2 * linux/arch/arm/mach-omap/dsp/dsp_mem.c
4 * OMAP DSP memory driver
6 * Copyright (C) 2002-2005 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
10 * Conversion to mempool API and ARM MMU section mapping
11 * by Paul Mundt <paul.mundt@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
28 * 2005/06/09: DSP Gateway version 3.3
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/major.h>
35 #include <linux/bootmem.h>
37 #include <linux/interrupt.h>
38 #include <linux/delay.h>
39 #include <linux/mempool.h>
40 #include <linux/platform_device.h>
41 #include <linux/clk.h>
42 #include <asm/uaccess.h>
44 #include <asm/ioctls.h>
46 #include <asm/pgalloc.h>
47 #include <asm/pgtable.h>
48 #include <asm/arch/tc.h>
49 #include <asm/arch/omapfb.h>
50 #include <asm/arch/dsp.h>
51 #include <asm/arch/dsp_common.h>
52 #include "uaccess_dsp.h"
56 #define SZ_1MB 0x100000
57 #define SZ_64KB 0x10000
60 #define is_aligned(adr,align) (!((adr)&((align)-1)))
61 #define ORDER_1MB (20 - PAGE_SHIFT)
62 #define ORDER_64KB (16 - PAGE_SHIFT)
63 #define ORDER_4KB (12 - PAGE_SHIFT)
65 #define PGDIR_MASK (~(PGDIR_SIZE-1))
66 #define PGDIR_ALIGN(addr) (((addr)+PGDIR_SIZE-1)&(PGDIR_MASK))
68 #define dsp_mmu_enable() \
70 omap_writew(DSPMMU_CNTL_MMU_EN | DSPMMU_CNTL_RESET_SW, \
73 #define dsp_mmu_disable() \
74 do { omap_writew(0, DSPMMU_CNTL); } while(0)
75 #define dsp_mmu_flush() \
77 omap_writew(DSPMMU_FLUSH_ENTRY_FLUSH_ENTRY, \
78 DSPMMU_FLUSH_ENTRY); \
80 #define __dsp_mmu_gflush() \
81 do { omap_writew(DSPMMU_GFLUSH_GFLUSH, DSPMMU_GFLUSH); } while(0)
82 #define __dsp_mmu_itack() \
83 do { omap_writew(DSPMMU_IT_ACK_IT_ACK, DSPMMU_IT_ACK); } while(0)
85 #define EMIF_PRIO_LB_MASK 0x0000f000
86 #define EMIF_PRIO_LB_SHIFT 12
87 #define EMIF_PRIO_DMA_MASK 0x00000f00
88 #define EMIF_PRIO_DMA_SHIFT 8
89 #define EMIF_PRIO_DSP_MASK 0x00000070
90 #define EMIF_PRIO_DSP_SHIFT 4
91 #define EMIF_PRIO_MPU_MASK 0x00000007
92 #define EMIF_PRIO_MPU_SHIFT 0
93 #define set_emiff_dma_prio(prio) \
95 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
96 ~EMIF_PRIO_DMA_MASK) | \
97 ((prio) << EMIF_PRIO_DMA_SHIFT), \
98 OMAP_TC_OCPT1_PRIOR); \
107 unsigned int valid:1;
108 unsigned int cntnu:1; /* grouping */
109 int usecount; /* reference count by mmap */
110 enum exmap_type type;
111 void *buf; /* virtual address of the buffer,
112 * i.e. 0xc0000000 - */
113 void *vadr; /* DSP shadow space,
114 * i.e. 0xe0000000 - 0xe0ffffff */
117 #define DSPMMU_TLB_LINES 32
118 static struct exmap_tbl exmap_tbl[DSPMMU_TLB_LINES];
119 static DECLARE_RWSEM(exmap_sem);
121 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
122 static struct omapfb_notifier_block *omapfb_nb;
123 static int omapfb_ready;
126 static int dsp_exunmap(unsigned long dspadr);
128 static void *dspvect_page;
129 static unsigned long dsp_fault_adr;
130 static struct mem_sync_struct mem_sync;
132 static void *mempool_alloc_from_pool(mempool_t *pool,
133 unsigned int __nocast gfp_mask)
135 spin_lock_irq(&pool->lock);
136 if (likely(pool->curr_nr)) {
137 void *element = pool->elements[--pool->curr_nr];
138 spin_unlock_irq(&pool->lock);
142 spin_unlock_irq(&pool->lock);
143 return mempool_alloc(pool, gfp_mask);
146 static __inline__ unsigned long lineup_offset(unsigned long adr,
150 unsigned long newadr;
152 newadr = (adr & ~mask) | (ref & mask);
158 int dsp_mem_sync_inc(void)
160 if (dsp_mem_enable((void *)dspmem_base) < 0)
163 mem_sync.DARAM->ad_arm++;
165 mem_sync.SARAM->ad_arm++;
167 mem_sync.SDRAM->ad_arm++;
168 dsp_mem_disable((void *)dspmem_base);
173 * dsp_mem_sync_config() is called from mbx1 workqueue
175 int dsp_mem_sync_config(struct mem_sync_struct *sync)
177 size_t sync_seq_sz = sizeof(struct sync_seq);
179 #ifdef OLD_BINARY_SUPPORT
181 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
185 if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
186 (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
187 (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
189 "omapdsp: mem_sync address validation failure!\n"
190 " mem_sync.DARAM = 0x%p,\n"
191 " mem_sync.SARAM = 0x%p,\n"
192 " mem_sync.SDRAM = 0x%p,\n",
193 sync->DARAM, sync->SARAM, sync->SDRAM);
196 memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
200 static mempool_t *kmem_pool_1M;
201 static mempool_t *kmem_pool_64K;
203 static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
205 return (void *)__get_dma_pages(gfp, (unsigned int)order);
208 static void dsp_pool_free(void *buf, void *order)
210 free_pages((unsigned long)buf, (unsigned int)order);
213 static void dsp_kmem_release(void)
216 mempool_destroy(kmem_pool_64K);
217 kmem_pool_64K = NULL;
221 mempool_destroy(kmem_pool_1M);
226 static int dsp_kmem_reserve(unsigned long size)
228 unsigned long len = size;
230 /* alignment check */
231 if (!is_aligned(size, SZ_64KB)) {
233 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
236 if (size > DSPSPACE_SIZE) {
238 "omapdsp: size(0x%lx) is larger than DSP memory space "
239 "size (0x%x.\n", size, DSPSPACE_SIZE);
243 if (size >= SZ_1MB) {
246 if (likely(!kmem_pool_1M))
247 kmem_pool_1M = mempool_create(nr,
252 mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
255 size &= ~(0xf << 20);
258 if (size >= SZ_64KB) {
261 if (likely(!kmem_pool_64K))
262 kmem_pool_64K = mempool_create(nr,
267 mempool_resize(kmem_pool_64K,
268 kmem_pool_64K->min_nr + nr, GFP_KERNEL);
270 size &= ~(0xf << 16);
279 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
281 struct page *page, *ps, *pe;
283 ps = virt_to_page(buf);
284 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
286 for (page = ps; page < pe; page++)
287 ClearPageReserved(page);
290 if ((order == ORDER_64KB) && likely(kmem_pool_64K))
291 mempool_free((void *)buf, kmem_pool_64K);
292 else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
293 mempool_free((void *)buf, kmem_pool_1M);
295 free_pages(buf, order);
300 exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
307 pgd = pgd_offset_k(virt);
308 pud = pud_offset(pgd, virt);
309 pmd = pmd_offset(pud, virt);
311 if (pmd_none(*pmd)) {
312 pte = pte_alloc_one_kernel(&init_mm, 0);
316 /* note: two PMDs will be set */
317 pmd_populate_kernel(&init_mm, pmd, pte);
320 pte = pte_offset_kernel(pmd, virt);
321 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
325 exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
331 pgd = pgd_offset_k(virt);
332 pud = pud_alloc(&init_mm, pgd, virt);
333 pmd = pmd_alloc(&init_mm, pud, virt);
335 if (virt & (1 << 20))
339 /* No good, fall back on smaller mappings. */
342 *pmd = __pmd(phys | prot);
343 flush_pmd_entry(pmd);
351 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
359 "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
362 prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
363 L_PTE_DIRTY | L_PTE_WRITE);
365 prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
366 PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
368 if (cpu_architecture() <= CPU_ARCH_ARMv5)
369 prot_sect |= PMD_BIT4;
373 while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
374 exmap_alloc_pte(virt, virt + off, prot_pte);
380 /* XXX: Not yet.. confuses dspfb -- PFM. */
382 while (size >= (PGDIR_SIZE / 2)) {
383 if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
386 virt += (PGDIR_SIZE / 2);
387 size -= (PGDIR_SIZE / 2);
391 while (size >= PAGE_SIZE) {
392 exmap_alloc_pte(virt, virt + off, prot_pte);
404 exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
408 pte = pte_offset_map(pmd, addr);
413 pte_clear(&init_mm, addr, pte);
414 } while (pte++, addr += PAGE_SIZE, addr != end);
420 exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
425 pmd = pmd_offset(pud, addr);
427 next = pmd_addr_end(addr, end);
429 if (addr & (1 << 20))
432 if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
434 clean_pmd_entry(pmd);
438 if (pmd_none_or_clear_bad(pmd))
441 exmap_clear_pte_range(pmd, addr, next);
442 } while (pmd++, addr = next, addr != end);
446 exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
451 pud = pud_offset(pgd, addr);
453 next = pud_addr_end(addr, end);
454 if (pud_none_or_clear_bad(pud))
457 exmap_clear_pmd_range(pud, addr, next);
458 } while (pud++, addr = next, addr != end);
461 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
463 unsigned long next, end;
467 "omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
470 pgd = pgd_offset_k(virt);
473 next = pgd_addr_end(virt, end);
474 if (pgd_none_or_clear_bad(pgd))
477 exmap_clear_pud_range(pgd, virt, next);
478 } while (pgd++, virt = next, virt != end);
481 static int exmap_valid(void *vadr, size_t len)
483 /* exmap_sem should be held before calling this function */
487 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
489 unsigned long mapsize;
490 struct exmap_tbl *ent = &exmap_tbl[i];
494 mapadr = (void *)ent->vadr;
495 mapsize = 1 << (ent->order + PAGE_SHIFT);
496 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
497 if (vadr + len <= mapadr + mapsize) {
498 /* this map covers whole address. */
502 * this map covers partially.
503 * check rest portion.
505 len -= mapadr + mapsize - vadr;
506 vadr = mapadr + mapsize;
515 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
517 void *ds = (void *)daram_base;
518 void *de = (void *)daram_base + daram_size;
519 void *ss = (void *)saram_base;
520 void *se = (void *)saram_base + saram_size;
523 if ((vadr >= ds) && (vadr < de)) {
525 return MEM_TYPE_CROSSING;
527 return MEM_TYPE_DARAM;
528 } else if ((vadr >= ss) && (vadr < se)) {
530 return MEM_TYPE_CROSSING;
532 return MEM_TYPE_SARAM;
534 down_read(&exmap_sem);
535 if (exmap_valid(vadr, len))
536 ret = MEM_TYPE_EXTERN;
544 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
546 if (dsp_mem_type(p, len) <= 0) {
552 vsprintf(s, fmt, args);
555 "omapdsp: %s address(0x%p) and size(0x%x) is "
557 " (crossing different type of memories, or \n"
558 " external memory space where no "
559 "actual memory is mapped)\n",
569 * exmap_use(), unuse():
570 * when the mapped area is exported to user space with mmap,
571 * the usecount is incremented.
572 * while the usecount > 0, that area can't be released.
574 void exmap_use(void *vadr, size_t len)
578 down_write(&exmap_sem);
579 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
581 unsigned long mapsize;
582 struct exmap_tbl *ent = &exmap_tbl[i];
586 mapadr = (void *)ent->vadr;
587 mapsize = 1 << (ent->order + PAGE_SHIFT);
588 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
592 up_write(&exmap_sem);
595 void exmap_unuse(void *vadr, size_t len)
599 down_write(&exmap_sem);
600 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
602 unsigned long mapsize;
603 struct exmap_tbl *ent = &exmap_tbl[i];
607 mapadr = (void *)ent->vadr;
608 mapsize = 1 << (ent->order + PAGE_SHIFT);
609 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
613 up_write(&exmap_sem);
618 * returns physical address, and sets len to valid length
620 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
624 if (is_dsp_internal_mem(vadr)) {
626 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
627 return (unsigned long)vadr;
631 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
633 unsigned long mapsize;
634 struct exmap_tbl *ent = &exmap_tbl[i];
638 mapadr = (void *)ent->vadr;
639 mapsize = 1 << (ent->order + PAGE_SHIFT);
640 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
641 *len = mapadr + mapsize - vadr;
642 return __pa(ent->buf) + vadr - mapadr;
646 /* valid mapping not found */
653 static __inline__ unsigned short get_cam_l_va_mask(unsigned short slst)
656 case DSPMMU_CAM_L_SLST_1MB:
657 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
658 DSPMMU_CAM_L_VA_TAG_L2_MASK_1MB;
659 case DSPMMU_CAM_L_SLST_64KB:
660 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
661 DSPMMU_CAM_L_VA_TAG_L2_MASK_64KB;
662 case DSPMMU_CAM_L_SLST_4KB:
663 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
664 DSPMMU_CAM_L_VA_TAG_L2_MASK_4KB;
665 case DSPMMU_CAM_L_SLST_1KB:
666 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
667 DSPMMU_CAM_L_VA_TAG_L2_MASK_1KB;
672 static __inline__ void get_tlb_lock(int *base, int *victim)
674 unsigned short lock = omap_readw(DSPMMU_LOCK);
676 *base = (lock & DSPMMU_LOCK_BASE_MASK)
677 >> DSPMMU_LOCK_BASE_SHIFT;
679 *victim = (lock & DSPMMU_LOCK_VICTIM_MASK)
680 >> DSPMMU_LOCK_VICTIM_SHIFT;
683 static __inline__ void set_tlb_lock(int base, int victim)
685 omap_writew((base << DSPMMU_LOCK_BASE_SHIFT) |
686 (victim << DSPMMU_LOCK_VICTIM_SHIFT), DSPMMU_LOCK);
689 static __inline__ void __read_tlb(unsigned short lbase, unsigned short victim,
690 unsigned short *cam_h, unsigned short *cam_l,
691 unsigned short *ram_h, unsigned short *ram_l)
694 set_tlb_lock(lbase, victim);
696 /* read a TLB entry */
697 omap_writew(DSPMMU_LD_TLB_RD, DSPMMU_LD_TLB);
700 *cam_h = omap_readw(DSPMMU_READ_CAM_H);
702 *cam_l = omap_readw(DSPMMU_READ_CAM_L);
704 *ram_h = omap_readw(DSPMMU_READ_RAM_H);
706 *ram_l = omap_readw(DSPMMU_READ_RAM_L);
709 static __inline__ void __load_tlb(unsigned short cam_h, unsigned short cam_l,
710 unsigned short ram_h, unsigned short ram_l)
712 omap_writew(cam_h, DSPMMU_CAM_H);
713 omap_writew(cam_l, DSPMMU_CAM_L);
714 omap_writew(ram_h, DSPMMU_RAM_H);
715 omap_writew(ram_l, DSPMMU_RAM_L);
717 /* flush the entry */
720 /* load a TLB entry */
721 omap_writew(DSPMMU_LD_TLB_LD, DSPMMU_LD_TLB);
724 static int dsp_mmu_load_tlb(unsigned long vadr, unsigned long padr,
725 unsigned short slst, unsigned short prsvd,
729 unsigned short cam_l_va_mask;
731 clk_enable(dsp_ck_handle);
733 get_tlb_lock(&lbase, NULL);
734 for (victim = 0; victim < lbase; victim++) {
735 unsigned short cam_l;
737 /* read a TLB entry */
738 __read_tlb(lbase, victim, NULL, &cam_l, NULL, NULL);
739 if (!(cam_l & DSPMMU_CAM_L_V))
742 set_tlb_lock(lbase, victim);
745 /* The last (31st) entry cannot be locked? */
747 printk(KERN_ERR "omapdsp: TLB is full.\n");
751 cam_l_va_mask = get_cam_l_va_mask(slst);
753 ~(DSPMMU_CAM_H_VA_TAG_H_MASK << 22 |
754 (unsigned long)cam_l_va_mask << 6)) {
756 "omapdsp: mapping vadr (0x%06lx) is not "
757 "aligned boundary\n", vadr);
761 __load_tlb(vadr >> 22, (vadr >> 6 & cam_l_va_mask) | prsvd | slst,
762 padr >> 16, (padr & DSPMMU_RAM_L_RAM_LSB_MASK) | ap);
764 /* update lock base */
767 set_tlb_lock(lbase, lbase);
769 clk_disable(dsp_ck_handle);
773 static int dsp_mmu_clear_tlb(unsigned long vadr)
779 clk_enable(dsp_ck_handle);
781 get_tlb_lock(&lbase, NULL);
782 for (i = 0; i < lbase; i++) {
783 unsigned short cam_h, cam_l;
784 unsigned short cam_l_va_mask, cam_vld, slst;
785 unsigned long cam_va;
787 /* read a TLB entry */
788 __read_tlb(lbase, i, &cam_h, &cam_l, NULL, NULL);
790 cam_vld = cam_l & DSPMMU_CAM_L_V;
794 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
795 cam_l_va_mask = get_cam_l_va_mask(slst);
796 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
797 (unsigned long)(cam_l & cam_l_va_mask) << 6;
800 /* flush the entry */
806 /* set new lock base */
807 set_tlb_lock(max_valid+1, max_valid+1);
809 clk_disable(dsp_ck_handle);
813 static void dsp_mmu_gflush(void)
815 clk_enable(dsp_ck_handle);
820 clk_disable(dsp_ck_handle);
826 * OMAP_DSP_MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
827 * In this case, the buffer for DSP is allocated in this routine,
829 * On the other hand, for example - frame buffer sharing, calls
830 * this function with padr set. It means some known address space
831 * pointed with padr is going to be shared with DSP.
833 static int dsp_exmap(unsigned long dspadr, unsigned long padr,
834 unsigned long size, enum exmap_type type)
838 unsigned int order = 0;
840 unsigned int cntnu = 0;
841 unsigned long _dspadr = dspadr;
842 unsigned long _padr = padr;
843 void *_vadr = dspbyte_to_virt(dspadr);
844 unsigned long _size = size;
845 struct exmap_tbl *exmap_ent;
849 #define MINIMUM_PAGESZ SZ_4KB
853 if (!is_aligned(size, MINIMUM_PAGESZ)) {
855 "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
858 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
860 "omapdsp: DSP address(0x%lx) is not aligned.\n", dspadr);
863 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
865 "omapdsp: physical address(0x%lx) is not aligned.\n",
870 /* address validity check */
871 if ((dspadr < dspmem_size) ||
872 (dspadr >= DSPSPACE_SIZE) ||
873 ((dspadr + size > DSP_INIT_PAGE) &&
874 (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
876 "omapdsp: illegal address/size for dsp_exmap().\n");
880 down_write(&exmap_sem);
883 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
884 unsigned long mapsize;
885 struct exmap_tbl *tmp_ent = &exmap_tbl[i];
889 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
890 if ((_vadr + size > tmp_ent->vadr) &&
891 (_vadr < tmp_ent->vadr + mapsize)) {
892 printk(KERN_ERR "omapdsp: exmap page overlap!\n");
893 up_write(&exmap_sem);
900 /* Are there any free TLB lines? */
901 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
902 if (!exmap_tbl[i].valid)
905 printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
910 exmap_ent = &exmap_tbl[i];
912 if ((_size >= SZ_1MB) &&
913 (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
914 is_aligned(_dspadr, SZ_1MB)) {
916 slst = DSPMMU_CAM_L_SLST_1MB;
917 } else if ((_size >= SZ_64KB) &&
918 (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
919 is_aligned(_dspadr, SZ_64KB)) {
921 slst = DSPMMU_CAM_L_SLST_64KB;
924 slst = DSPMMU_CAM_L_SLST_4KB;
927 order = get_order(unit);
929 /* buffer allocation */
930 if (type == EXMAP_TYPE_MEM) {
931 struct page *page, *ps, *pe;
933 if ((order == ORDER_1MB) && likely(kmem_pool_1M))
934 buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
935 else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
936 buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
938 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
945 /* mark the pages as reserved; this is needed for mmap */
946 ps = virt_to_page(buf);
947 pe = virt_to_page(buf + unit);
949 for (page = ps; page < pe; page++)
950 SetPageReserved(page);
956 * mapping for ARM MMU:
957 * we should not access to the allocated memory through 'buf'
958 * since this area should not be cashed.
960 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
964 /* loading DSP TLB entry */
965 status = dsp_mmu_load_tlb(_dspadr, _padr, slst, 0, DSPMMU_RAM_L_AP_FA);
967 exmap_clear_armmmu((unsigned long)_vadr, unit);
971 exmap_ent->buf = buf;
972 exmap_ent->vadr = _vadr;
973 exmap_ent->order = order;
974 exmap_ent->valid = 1;
975 exmap_ent->cntnu = cntnu;
976 exmap_ent->type = type;
977 exmap_ent->usecount = 0;
979 if ((_size -= unit) == 0) { /* normal completion */
980 up_write(&exmap_sem);
986 _padr = padr ? _padr + unit : 0;
991 up_write(&exmap_sem);
993 dsp_mem_free_pages((unsigned long)buf, order);
998 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
1002 /* clearing ARM MMU */
1003 size = 1 << (ent->order + PAGE_SHIFT);
1004 exmap_clear_armmmu((unsigned long)ent->vadr, size);
1006 /* freeing allocated memory */
1007 if (ent->type == EXMAP_TYPE_MEM) {
1008 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
1010 "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
1013 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1014 else if (ent->type == EXMAP_TYPE_FB) {
1017 status = omapfb_unregister_client(omapfb_nb);
1019 printk("omapfb_unregister_client(): "
1022 printk("omapfb_runegister_client(): "
1023 "failure(%d)\n", status);
1034 static int dsp_exunmap(unsigned long dspadr)
1039 struct exmap_tbl *ent;
1042 vadr = dspbyte_to_virt(dspadr);
1043 down_write(&exmap_sem);
1044 for (idx = 0; idx < DSPMMU_TLB_LINES; idx++) {
1045 ent = &exmap_tbl[idx];
1048 if (ent->vadr == vadr)
1051 up_write(&exmap_sem);
1053 "omapdsp: address %06lx not found in exmap_tbl.\n", dspadr);
1057 if (ent->usecount > 0) {
1059 "omapdsp: exmap reference count is not 0.\n"
1060 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
1061 idx, ent->vadr, ent->order, ent->usecount);
1062 up_write(&exmap_sem);
1065 /* clearing DSP TLB entry */
1066 dsp_mmu_clear_tlb(dspadr);
1068 /* clear ARM MMU and free buffer */
1069 size = unmap_free_arm(ent);
1073 /* we don't free PTEs */
1076 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
1078 /* check if next mapping is in same group */
1079 if (++idx == DSPMMU_TLB_LINES)
1080 goto up_out; /* normal completion */
1081 ent = &exmap_tbl[idx];
1082 if (!ent->valid || !ent->cntnu)
1083 goto up_out; /* normal completion */
1087 if (ent->vadr == vadr)
1088 goto found_map; /* continue */
1091 "omapdsp: illegal exmap_tbl grouping!\n"
1092 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1093 vadr, idx, ent->vadr);
1094 up_write(&exmap_sem);
1098 up_write(&exmap_sem);
1102 static void exmap_flush(void)
1104 struct exmap_tbl *ent;
1107 down_write(&exmap_sem);
1109 /* clearing DSP TLB entry */
1112 /* exmap_tbl[0] should be preserved */
1113 for (i = 1; i < DSPMMU_TLB_LINES; i++) {
1114 ent = &exmap_tbl[i];
1116 unmap_free_arm(ent);
1122 flush_tlb_kernel_range(dspmem_base + dspmem_size,
1123 dspmem_base + DSPSPACE_SIZE);
1124 up_write(&exmap_sem);
1127 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1129 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1130 #endif /* CONFIG_FB */
1132 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1133 static int omapfb_notifier_cb(struct omapfb_notifier_block *omapfb_nb,
1134 unsigned long event, struct omapfb_device *fbdev)
1137 printk("omapfb_notifier_cb(): event = %s\n",
1138 (event == OMAPFB_EVENT_READY) ? "READY" :
1139 (event == OMAPFB_EVENT_DISABLED) ? "DISABLED" : "Unknown");
1140 if (event == OMAPFB_EVENT_READY)
1142 else if (event == OMAPFB_EVENT_DISABLED)
1148 static int dsp_fbexport(unsigned long *dspadr)
1150 unsigned long dspadr_actual;
1151 unsigned long padr_sys, padr, fbsz_sys, fbsz;
1153 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1157 printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1159 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1162 "omapdsp: frame buffer has been exported already!\n");
1167 if (num_registered_fb == 0) {
1168 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1171 if (num_registered_fb != 1) {
1173 "omapdsp: %d frame buffers found. we use first one.\n",
1176 padr_sys = registered_fb[0]->fix.smem_start;
1177 fbsz_sys = registered_fb[0]->fix.smem_len;
1178 if (fbsz_sys == 0) {
1180 "omapdsp: framebuffer doesn't seem to be configured "
1181 "correctly! (size=0)\n");
1186 * align padr and fbsz to 4kB boundary
1187 * (should be noted to the user afterwards!)
1189 padr = padr_sys & ~(SZ_4KB-1);
1190 fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1192 /* line up dspadr offset with padr */
1194 (fbsz > SZ_1MB) ? lineup_offset(*dspadr, padr, SZ_1MB-1) :
1195 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1196 /* (fbsz > SZ_4KB) ? */ *dspadr;
1197 if (dspadr_actual != *dspadr)
1199 "omapdsp: actual dspadr for FBEXPORT = %08lx\n",
1201 *dspadr = dspadr_actual;
1203 cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1205 printk(KERN_ERR "omapdsp: exmap failure.\n");
1209 if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1211 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1212 " !! screen base address or size is not aligned in 4kB: !!\n"
1213 " !! actual screen adr = %08lx, size = %08lx !!\n"
1214 " !! exporting adr = %08lx, size = %08lx !!\n"
1215 " !! Make sure that the framebuffer is allocated with 4kB-order! !!\n"
1216 " !! Otherwise DSP can corrupt the kernel memory. !!\n"
1217 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1218 padr_sys, fbsz_sys, padr, fbsz);
1221 /* increase the DMA priority */
1222 set_emiff_dma_prio(15);
1224 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1225 omapfb_nb = kmalloc(sizeof(struct omapfb_notifier_block), GFP_KERNEL);
1226 if (omapfb_nb == NULL) {
1228 "omapdsp: failed to allocate memory for omapfb_nb!\n");
1229 dsp_exunmap(dspadr_actual);
1232 status = omapfb_register_client(omapfb_nb, omapfb_notifier_cb, NULL);
1234 printk("omapfb_register_client(): success\n");
1236 printk("omapfb_register_client(): failure(%d)\n", status);
1242 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1244 static int dsp_fbexport(unsigned long *dspadr)
1246 printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1250 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1252 static int dsp_mmu_itack(void)
1254 unsigned long dspadr;
1256 printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1257 if (!dsp_err_mmu_isset()) {
1258 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1261 dspadr = dsp_fault_adr & ~(SZ_4K-1);
1262 dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM); /* FIXME: reserve TLB entry for this */
1263 printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1264 dsp_runlevel(OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY);
1267 dsp_exunmap(dspadr);
1268 dsp_err_mmu_clear();
1272 static void dsp_mmu_init(void)
1277 clk_enable(dsp_ck_handle);
1278 down_write(&exmap_sem);
1280 dsp_mmu_disable(); /* clear all */
1284 /* mapping for ARM MMU */
1285 phys = __pa(dspvect_page);
1286 virt = dspbyte_to_virt(DSP_INIT_PAGE); /* 0xe0fff000 */
1287 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1288 exmap_tbl[0].buf = dspvect_page;
1289 exmap_tbl[0].vadr = virt;
1290 exmap_tbl[0].usecount = 0;
1291 exmap_tbl[0].order = 0;
1292 exmap_tbl[0].valid = 1;
1293 exmap_tbl[0].cntnu = 0;
1295 /* DSP TLB initialization */
1297 /* preserved, full access */
1298 dsp_mmu_load_tlb(DSP_INIT_PAGE, phys, DSPMMU_CAM_L_SLST_4KB,
1299 DSPMMU_CAM_L_P, DSPMMU_RAM_L_AP_FA);
1300 up_write(&exmap_sem);
1301 clk_disable(dsp_ck_handle);
1304 static void dsp_mmu_shutdown(void)
1307 dsp_mmu_disable(); /* clear all */
1311 * intmem_enable() / disable():
1312 * if the address is in DSP internal memories,
1313 * we send PM mailbox commands so that DSP DMA domain won't go in idle
1314 * when ARM is accessing to those memories.
1316 static int intmem_enable(void)
1321 ret = dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_ENABLE,
1322 DSPREG_ICR_DMA_IDLE_DOMAIN);
1327 static void intmem_disable(void) {
1329 dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_DISABLE,
1330 DSPREG_ICR_DMA_IDLE_DOMAIN);
1334 * dsp_mem_enable() / disable()
1336 int intmem_usecount;
1338 int dsp_mem_enable(void *adr)
1342 if (is_dsp_internal_mem(adr)) {
1343 if (intmem_usecount++ == 0)
1344 ret = omap_dsp_request_mem();
1346 down_read(&exmap_sem);
1351 void dsp_mem_disable(void *adr)
1353 if (is_dsp_internal_mem(adr)) {
1354 if (--intmem_usecount == 0)
1355 omap_dsp_release_mem();
1357 up_read(&exmap_sem);
1361 void dsp_mem_usecount_clear(void)
1363 if (intmem_usecount != 0) {
1365 "omapdsp: unbalanced memory request/release detected.\n"
1366 " intmem_usecount is not zero at where "
1367 "it should be! ... fixed to be zero.\n");
1368 intmem_usecount = 0;
1369 omap_dsp_release_mem();
1374 * dsp_mem file operations
1376 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1380 mutex_lock(&file->f_dentry->d_inode->i_mutex);
1383 file->f_pos = offset;
1387 file->f_pos += offset;
1393 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1397 static ssize_t intmem_read(struct file *file, char *buf, size_t count,
1400 unsigned long p = *ppos;
1401 void *vadr = dspbyte_to_virt(p);
1402 ssize_t size = dspmem_size;
1407 clk_enable(api_ck_handle);
1409 if (count > size - p)
1411 if (copy_to_user(buf, vadr, read)) {
1417 clk_disable(api_ck_handle);
1421 static ssize_t exmem_read(struct file *file, char *buf, size_t count,
1424 unsigned long p = *ppos;
1425 void *vadr = dspbyte_to_virt(p);
1427 if (!exmap_valid(vadr, count)) {
1429 "omapdsp: DSP address %08lx / size %08x "
1430 "is not valid!\n", p, count);
1433 if (count > DSPSPACE_SIZE - p)
1434 count = DSPSPACE_SIZE - p;
1435 if (copy_to_user(buf, vadr, count))
1442 static ssize_t dsp_mem_read(struct file *file, char *buf, size_t count,
1446 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1448 if (dsp_mem_enable(vadr) < 0)
1450 if (is_dspbyte_internal_mem(*ppos))
1451 ret = intmem_read(file, buf, count, ppos);
1453 ret = exmem_read(file, buf, count, ppos);
1454 dsp_mem_disable(vadr);
1459 static ssize_t intmem_write(struct file *file, const char *buf, size_t count,
1462 unsigned long p = *ppos;
1463 void *vadr = dspbyte_to_virt(p);
1464 ssize_t size = dspmem_size;
1469 clk_enable(api_ck_handle);
1471 if (count > size - p)
1473 if (copy_from_user(vadr, buf, written)) {
1479 clk_disable(api_ck_handle);
1483 static ssize_t exmem_write(struct file *file, const char *buf, size_t count,
1486 unsigned long p = *ppos;
1487 void *vadr = dspbyte_to_virt(p);
1489 if (!exmap_valid(vadr, count)) {
1491 "omapdsp: DSP address %08lx / size %08x "
1492 "is not valid!\n", p, count);
1495 if (count > DSPSPACE_SIZE - p)
1496 count = DSPSPACE_SIZE - p;
1497 if (copy_from_user(vadr, buf, count))
1504 static ssize_t dsp_mem_write(struct file *file, const char *buf, size_t count,
1508 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1510 if (dsp_mem_enable(vadr) < 0)
1512 if (is_dspbyte_internal_mem(*ppos))
1513 ret = intmem_write(file, buf, count, ppos);
1515 ret = exmem_write(file, buf, count, ppos);
1516 dsp_mem_disable(vadr);
1521 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1522 unsigned int cmd, unsigned long arg)
1525 case OMAP_DSP_MEM_IOCTL_MMUINIT:
1529 case OMAP_DSP_MEM_IOCTL_EXMAP:
1531 struct omap_dsp_mapinfo mapinfo;
1532 if (copy_from_user(&mapinfo, (void *)arg,
1535 return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1539 case OMAP_DSP_MEM_IOCTL_EXUNMAP:
1540 return dsp_exunmap((unsigned long)arg);
1542 case OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH:
1546 case OMAP_DSP_MEM_IOCTL_FBEXPORT:
1548 unsigned long dspadr;
1550 if (copy_from_user(&dspadr, (void *)arg, sizeof(long)))
1552 ret = dsp_fbexport(&dspadr);
1553 if (copy_to_user((void *)arg, &dspadr, sizeof(long)))
1558 case OMAP_DSP_MEM_IOCTL_MMUITACK:
1559 return dsp_mmu_itack();
1561 case OMAP_DSP_MEM_IOCTL_KMEM_RESERVE:
1564 if (copy_from_user(&size, (void *)arg, sizeof(long)))
1566 return dsp_kmem_reserve(size);
1569 case OMAP_DSP_MEM_IOCTL_KMEM_RELEASE:
1574 return -ENOIOCTLCMD;
1578 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1586 static int dsp_mem_open(struct inode *inode, struct file *file)
1588 if (!capable(CAP_SYS_RAWIO))
1594 static int dsp_mem_release(struct inode *inode, struct file *file)
1599 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1601 * fb update functions:
1602 * fbupd_response() is executed by the workqueue.
1603 * fbupd_cb() is called when fb update is done, in interrupt context.
1604 * mbx1_fbupd() is called when KFUNC:FBCTL:UPD is received from DSP.
1606 static void fbupd_response(void *arg)
1610 status = dsp_mbsend(MBCMD(KFUNC), OMAP_DSP_MBCMD_KFUNC_FBCTL,
1611 OMAP_DSP_MBCMD_FBCTL_UPD);
1613 /* FIXME: DSP is busy !! */
1615 "omapdsp: DSP is busy when trying to send FBCTL:UPD "
1620 static DECLARE_WORK(fbupd_response_work, (void (*)(void *))fbupd_response,
1623 static void fbupd_cb(void *arg)
1625 schedule_work(&fbupd_response_work);
1628 void mbx1_fbctl_upd(void)
1630 struct omapfb_update_window win;
1631 volatile unsigned short *buf = ipbuf_sys_da->d;
1633 /* FIXME: try count sometimes exceeds 1000. */
1634 if (sync_with_dsp(&ipbuf_sys_da->s, OMAP_DSP_TID_ANON, 5000) < 0) {
1635 printk(KERN_ERR "mbx: FBCTL:UPD - IPBUF sync failed!\n");
1641 win.height = buf[3];
1642 win.format = buf[4];
1643 release_ipbuf_pvt(ipbuf_sys_da);
1645 if (!omapfb_ready) {
1647 "omapdsp: fbupd() called while HWA742 is not ready!\n");
1650 //printk("calling omapfb_update_window_async()\n");
1651 omapfb_update_window_async(&win, fbupd_cb, NULL);
1654 #else /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
1656 void mbx1_fbctl_upd(void)
1659 #endif /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
1664 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
1671 clk_enable(dsp_ck_handle);
1672 down_read(&exmap_sem);
1674 get_tlb_lock(&lbase, &victim);
1676 len = sprintf(buf, "p: preserved, v: valid\n"
1677 "ety cam_va ram_pa sz ap\n");
1678 /* 00: p v 0x300000 0x10171800 64KB FA */
1679 for (i = 0; i < 32; i++) {
1680 unsigned short cam_h, cam_l, ram_h, ram_l;
1681 unsigned short cam_l_va_mask, prsvd, cam_vld, slst;
1682 unsigned long cam_va;
1683 unsigned short ram_l_ap;
1684 unsigned long ram_pa;
1685 char *pgsz_str, *ap_str;
1687 /* read a TLB entry */
1688 __read_tlb(lbase, i, &cam_h, &cam_l, &ram_h, &ram_l);
1690 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
1691 cam_l_va_mask = get_cam_l_va_mask(slst);
1692 pgsz_str = (slst == DSPMMU_CAM_L_SLST_1MB) ? " 1MB":
1693 (slst == DSPMMU_CAM_L_SLST_64KB)? "64KB":
1694 (slst == DSPMMU_CAM_L_SLST_4KB) ? " 4KB":
1696 prsvd = cam_l & DSPMMU_CAM_L_P;
1697 cam_vld = cam_l & DSPMMU_CAM_L_V;
1698 ram_l_ap = ram_l & DSPMMU_RAM_L_AP_MASK;
1699 ap_str = (ram_l_ap == DSPMMU_RAM_L_AP_RO) ? "RO":
1700 (ram_l_ap == DSPMMU_RAM_L_AP_FA) ? "FA":
1702 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
1703 (unsigned long)(cam_l & cam_l_va_mask) << 6;
1704 ram_pa = (unsigned long)ram_h << 16 |
1705 (ram_l & DSPMMU_RAM_L_RAM_LSB_MASK);
1708 len += sprintf(buf + len, "lock base = %d\n", lbase);
1710 len += sprintf(buf + len, "victim = %d\n", victim);
1711 /* 00: p v 0x300000 0x10171800 64KB FA */
1712 len += sprintf(buf + len,
1713 "%02d: %c %c 0x%06lx 0x%08lx %s %s\n",
1716 cam_vld ? 'v' : ' ',
1717 cam_va, ram_pa, pgsz_str, ap_str);
1720 /* restore victim entry */
1721 set_tlb_lock(lbase, victim);
1723 up_read(&exmap_sem);
1724 clk_disable(dsp_ck_handle);
1728 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
1730 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1736 down_read(&exmap_sem);
1737 len = sprintf(buf, "v: valid, c: cntnu\n"
1738 "ety vadr buf od uc\n");
1739 /* 00: v c 0xe0300000 0xc0171800 0 */
1740 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1741 struct exmap_tbl *ent = &exmap_tbl[i];
1742 /* 00: v c 0xe0300000 0xc0171800 0 */
1743 len += sprintf(buf + len, "%02d: %c %c 0x%8p 0x%8p %2d %2d\n",
1745 ent->valid ? 'v' : ' ',
1746 ent->cntnu ? 'c' : ' ',
1747 ent->vadr, ent->buf, ent->order, ent->usecount);
1750 up_read(&exmap_sem);
1754 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
1756 static ssize_t kmem_pool_show(struct device *dev,
1757 struct device_attribute *attr, char *buf)
1759 int nr_1M, nr_64K, total;
1761 nr_1M = kmem_pool_1M->min_nr;
1762 nr_64K = kmem_pool_64K->min_nr;
1763 total = nr_1M * SZ_1MB + nr_64K * SZ_64KB;
1765 return sprintf(buf, "0x%x %d %d\n", total, nr_1M, nr_64K);
1768 static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool);
1771 * DSP MMU interrupt handler
1776 * We ignore prefetch err.
1778 #define MMUFAULT_MASK \
1779 (DSPMMU_FAULT_ST_PERM |\
1780 DSPMMU_FAULT_ST_TLB_MISS |\
1781 DSPMMU_FAULT_ST_TRANS)
1782 irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1784 unsigned short status;
1785 unsigned short adh, adl;
1788 status = omap_readw(DSPMMU_FAULT_ST);
1789 adh = omap_readw(DSPMMU_FAULT_AD_H);
1790 adl = omap_readw(DSPMMU_FAULT_AD_L);
1791 dp = adh & DSPMMU_FAULT_AD_H_DP;
1792 dsp_fault_adr = MKLONG(adh & DSPMMU_FAULT_AD_H_ADR_MASK, adl);
1793 /* if the fault is masked, nothing to do */
1794 if ((status & MMUFAULT_MASK) == 0) {
1795 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
1797 * note: in OMAP1710,
1798 * when CACHE + DMA domain gets out of idle in DSP,
1799 * MMU interrupt occurs but DSPMMU_FAULT_ST is not set.
1800 * in this case, we just ignore the interrupt.
1803 printk(KERN_DEBUG "%s%s%s%s\n",
1804 (status & DSPMMU_FAULT_ST_PREF)?
1805 " (prefetch err)" : "",
1806 (status & DSPMMU_FAULT_ST_PERM)?
1807 " (permission fault)" : "",
1808 (status & DSPMMU_FAULT_ST_TLB_MISS)?
1810 (status & DSPMMU_FAULT_ST_TRANS) ?
1811 " (translation fault)": "");
1813 "fault address = %s: 0x%06lx\n",
1814 dp ? "DATA" : "PROGRAM",
1820 printk(KERN_INFO "DSP MMU interrupt!\n");
1821 printk(KERN_INFO "%s%s%s%s\n",
1822 (status & DSPMMU_FAULT_ST_PREF)?
1823 (MMUFAULT_MASK & DSPMMU_FAULT_ST_PREF)?
1827 (status & DSPMMU_FAULT_ST_PERM)?
1828 (MMUFAULT_MASK & DSPMMU_FAULT_ST_PERM)?
1829 " permission fault":
1830 " (permission fault)":
1832 (status & DSPMMU_FAULT_ST_TLB_MISS)?
1833 (MMUFAULT_MASK & DSPMMU_FAULT_ST_TLB_MISS)?
1837 (status & DSPMMU_FAULT_ST_TRANS)?
1838 (MMUFAULT_MASK & DSPMMU_FAULT_ST_TRANS)?
1839 " translation fault":
1840 " (translation fault)":
1842 printk(KERN_INFO "fault address = %s: 0x%06lx\n",
1843 dp ? "DATA" : "PROGRAM",
1846 if (dsp_is_ready()) {
1848 * If we call dsp_exmap() here,
1849 * "kernel BUG at slab.c" occurs.
1852 dsp_err_mmu_set(dsp_fault_adr);
1854 disable_irq(INT_DSP_MMU);
1856 printk(KERN_INFO "Resetting DSP...\n");
1857 dsp_cpustat_request(CPUSTAT_RESET);
1858 enable_irq(INT_DSP_MMU);
1860 * if we enable followings, semaphore lock should be avoided.
1862 printk(KERN_INFO "Flushing DSP MMU...\n");
1874 struct file_operations dsp_mem_fops = {
1875 .owner = THIS_MODULE,
1876 .llseek = dsp_mem_lseek,
1877 .read = dsp_mem_read,
1878 .write = dsp_mem_write,
1879 .ioctl = dsp_mem_ioctl,
1880 .mmap = dsp_mem_mmap,
1881 .open = dsp_mem_open,
1882 .release = dsp_mem_release,
1885 void dsp_mem_start(void)
1887 dsp_register_mem_cb(intmem_enable, intmem_disable);
1890 void dsp_mem_stop(void)
1892 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
1893 dsp_unregister_mem_cb();
1896 int __init dsp_mem_init(void)
1900 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1901 exmap_tbl[i].valid = 0;
1904 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
1905 if (dspvect_page == NULL) {
1907 "omapdsp: failed to allocate memory "
1908 "for dsp vector table\n");
1912 dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
1914 device_create_file(&dsp_device.dev, &dev_attr_mmu);
1915 device_create_file(&dsp_device.dev, &dev_attr_exmap);
1916 device_create_file(&dsp_device.dev, &dev_attr_kmem_pool);
1921 void dsp_mem_exit(void)
1926 if (dspvect_page != NULL) {
1929 down_read(&exmap_sem);
1931 virt = (unsigned long)dspbyte_to_virt(DSP_INIT_PAGE);
1932 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
1933 free_page((unsigned long)dspvect_page);
1934 dspvect_page = NULL;
1936 up_read(&exmap_sem);
1939 device_remove_file(&dsp_device.dev, &dev_attr_mmu);
1940 device_remove_file(&dsp_device.dev, &dev_attr_exmap);
1941 device_remove_file(&dsp_device.dev, &dev_attr_kmem_pool);