2 * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
4 * Copyright (C) 2002-2006 Nokia Corporation. All rights reserved.
6 * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
8 * Conversion to mempool API and ARM MMU section mapping
9 * by Paul Mundt <paul.mundt@nokia.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 #include <linux/module.h>
28 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/mempool.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <asm/uaccess.h>
39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h>
41 #include <asm/arch/tc.h>
42 #include <asm/arch/omapfb.h>
43 #include <asm/arch/mailbox.h>
44 #include <asm/arch/dsp_common.h>
45 #include "uaccess_dsp.h"
46 #include "dsp_mbcmd.h"
47 #include "../mailbox_hw.h"
52 #ifdef CONFIG_ARCH_OMAP2
53 #define IOMAP_VAL 0x3f
58 #define SZ_64KB 0x10000
59 #define SZ_1MB 0x100000
60 #define SZ_16MB 0x1000000
61 #define is_aligned(adr,align) (!((adr)&((align)-1)))
62 #define ORDER_4KB (12 - PAGE_SHIFT)
63 #define ORDER_64KB (16 - PAGE_SHIFT)
64 #define ORDER_1MB (20 - PAGE_SHIFT)
67 * absorb DSP MMU register size and location difference
69 #if defined(CONFIG_ARCH_OMAP1)
70 typedef u16 dsp_mmu_reg_t;
71 #define dsp_mmu_read_reg(a) omap_readw(a)
72 #define dsp_mmu_write_reg(v,a) omap_writew(v,a)
73 #elif defined(CONFIG_ARCH_OMAP2)
74 typedef u32 dsp_mmu_reg_t;
75 #define dsp_mmu_read_reg(a) readl(a)
76 #define dsp_mmu_write_reg(v,a) writel(v,a)
77 #define dsp_ipi_read_reg(a) readl(a)
78 #define dsp_ipi_write_reg(v,a) writel(v,a)
81 #if defined(CONFIG_ARCH_OMAP1)
83 #define dsp_mmu_enable() \
85 dsp_mmu_write_reg(DSP_MMU_CNTL_MMU_EN | DSP_MMU_CNTL_RESET_SW, \
88 #define dsp_mmu_disable() \
90 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
92 #define __dsp_mmu_itack() \
94 dsp_mmu_write_reg(DSP_MMU_IT_ACK_IT_ACK, DSP_MMU_IT_ACK); \
97 #elif defined(CONFIG_ARCH_OMAP2)
99 #define dsp_mmu_enable() \
101 dsp_mmu_write_reg(DSP_MMU_CNTL_MMUENABLE, DSP_MMU_CNTL); \
103 #define dsp_mmu_disable() \
105 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
107 #define dsp_mmu_reset() \
109 dsp_mmu_write_reg(dsp_mmu_read_reg(DSP_MMU_SYSCONFIG) | \
110 DSP_MMU_SYSCONFIG_SOFTRESET, \
111 DSP_MMU_SYSCONFIG); \
114 #endif /* CONFIG_ARCH_OMAP2 */
116 #define dsp_mmu_flush() \
118 dsp_mmu_write_reg(DSP_MMU_FLUSH_ENTRY_FLUSH_ENTRY, \
119 DSP_MMU_FLUSH_ENTRY); \
121 #define __dsp_mmu_gflush() \
123 dsp_mmu_write_reg(DSP_MMU_GFLUSH_GFLUSH, DSP_MMU_GFLUSH); \
127 * absorb register name difference
129 #ifdef CONFIG_ARCH_OMAP1
130 #define DSP_MMU_CAM_P DSP_MMU_CAM_L_P
131 #define DSP_MMU_CAM_V DSP_MMU_CAM_L_V
132 #define DSP_MMU_CAM_PAGESIZE_MASK DSP_MMU_CAM_L_PAGESIZE_MASK
133 #define DSP_MMU_CAM_PAGESIZE_1MB DSP_MMU_CAM_L_PAGESIZE_1MB
134 #define DSP_MMU_CAM_PAGESIZE_64KB DSP_MMU_CAM_L_PAGESIZE_64KB
135 #define DSP_MMU_CAM_PAGESIZE_4KB DSP_MMU_CAM_L_PAGESIZE_4KB
136 #define DSP_MMU_CAM_PAGESIZE_1KB DSP_MMU_CAM_L_PAGESIZE_1KB
137 #endif /* CONFIG_ARCH_OMAP1 */
142 #ifdef CONFIG_ARCH_OMAP1
143 #define EMIF_PRIO_LB_MASK 0x0000f000
144 #define EMIF_PRIO_LB_SHIFT 12
145 #define EMIF_PRIO_DMA_MASK 0x00000f00
146 #define EMIF_PRIO_DMA_SHIFT 8
147 #define EMIF_PRIO_DSP_MASK 0x00000070
148 #define EMIF_PRIO_DSP_SHIFT 4
149 #define EMIF_PRIO_MPU_MASK 0x00000007
150 #define EMIF_PRIO_MPU_SHIFT 0
151 #define set_emiff_dma_prio(prio) \
153 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
154 ~EMIF_PRIO_DMA_MASK) | \
155 ((prio) << EMIF_PRIO_DMA_SHIFT), \
156 OMAP_TC_OCPT1_PRIOR); \
158 #endif /* CONFIG_ARCH_OMAP1 */
165 struct exmap_tbl_entry {
166 unsigned int valid:1;
167 unsigned int prsvd:1; /* preserved */
168 int usecount; /* reference count by mmap */
169 enum exmap_type_e type;
170 void *buf; /* virtual address of the buffer,
171 * i.e. 0xc0000000 - */
172 void *vadr; /* DSP shadow space,
173 * i.e. 0xe0000000 - 0xe0ffffff */
178 } link; /* grouping */
181 #define INIT_EXMAP_TBL_ENTRY(ent,b,v,typ,od) \
187 (ent)->usecount = 0; \
188 (ent)->type = (typ); \
189 (ent)->order = (od); \
190 (ent)->link.next = -1; \
191 (ent)->link.prev = -1; \
194 #define INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(ent,b,v) \
200 (ent)->usecount = 0; \
201 (ent)->type = EXMAP_TYPE_MEM; \
203 (ent)->link.next = -1; \
204 (ent)->link.prev = -1; \
207 #define DSP_MMU_TLB_LINES 32
208 static struct exmap_tbl_entry exmap_tbl[DSP_MMU_TLB_LINES];
209 static int exmap_preserved_cnt;
210 static DECLARE_RWSEM(exmap_sem);
212 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
213 static struct omapfb_notifier_block *omapfb_nb;
214 static int omapfb_ready;
217 struct cam_ram_regset {
218 #if defined(CONFIG_ARCH_OMAP1)
223 #elif defined(CONFIG_ARCH_OMAP2)
232 dsp_mmu_reg_t pgsz, prsvd, valid;
233 #if defined(CONFIG_ARCH_OMAP1)
235 #elif defined(CONFIG_ARCH_OMAP2)
236 dsp_mmu_reg_t endian, elsz, mixed;
240 #if defined(CONFIG_ARCH_OMAP1)
241 #define INIT_TLB_ENTRY(ent,v,p,ps) \
245 (ent)->pgsz = (ps); \
247 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
249 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
253 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
254 (ent)->prsvd = DSP_MMU_CAM_P; \
255 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
257 #elif defined(CONFIG_ARCH_OMAP2)
258 #define INIT_TLB_ENTRY(ent,v,p,ps) \
262 (ent)->pgsz = (ps); \
264 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
265 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
268 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
272 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
273 (ent)->prsvd = DSP_MMU_CAM_P; \
274 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
275 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
278 #define INIT_TLB_ENTRY_4KB_ES32_PRESERVED(ent,v,p) \
282 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
283 (ent)->prsvd = DSP_MMU_CAM_P; \
284 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
285 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_32; \
290 #if defined(CONFIG_ARCH_OMAP1)
291 #define cam_ram_valid(cr) ((cr).cam_l & DSP_MMU_CAM_V)
292 #elif defined(CONFIG_ARCH_OMAP2)
293 #define cam_ram_valid(cr) ((cr).cam & DSP_MMU_CAM_V)
301 static int dsp_exunmap(dsp_long_t dspadr);
303 static void *dspvect_page;
304 static u32 dsp_fault_adr;
305 static struct mem_sync_struct mem_sync;
307 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
309 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
311 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
314 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
315 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
316 static struct device_attribute dev_attr_mempool = __ATTR_RO(mempool);
319 * special mempool function:
320 * hope this goes to mm/mempool.c
322 static void *mempool_alloc_from_pool(mempool_t *pool, gfp_t gfp_mask)
326 spin_lock_irqsave(&pool->lock, flags);
327 if (likely(pool->curr_nr)) {
328 void *element = pool->elements[--pool->curr_nr];
329 spin_unlock_irqrestore(&pool->lock, flags);
332 spin_unlock_irqrestore(&pool->lock, flags);
334 return mempool_alloc(pool, gfp_mask);
337 static __inline__ unsigned long lineup_offset(unsigned long adr,
341 unsigned long newadr;
343 newadr = (adr & ~mask) | (ref & mask);
349 int dsp_mem_sync_inc(void)
351 if (dsp_mem_enable((void *)dspmem_base) < 0)
354 mem_sync.DARAM->ad_arm++;
356 mem_sync.SARAM->ad_arm++;
358 mem_sync.SDRAM->ad_arm++;
359 dsp_mem_disable((void *)dspmem_base);
364 * dsp_mem_sync_config() is called from mbox1 workqueue
366 int dsp_mem_sync_config(struct mem_sync_struct *sync)
368 size_t sync_seq_sz = sizeof(struct sync_seq);
370 #ifdef OLD_BINARY_SUPPORT
372 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
376 if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
377 (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
378 (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
380 "omapdsp: mem_sync address validation failure!\n"
381 " mem_sync.DARAM = 0x%p,\n"
382 " mem_sync.SARAM = 0x%p,\n"
383 " mem_sync.SDRAM = 0x%p,\n",
384 sync->DARAM, sync->SARAM, sync->SDRAM);
387 memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
391 static mempool_t *kmem_pool_1M;
392 static mempool_t *kmem_pool_64K;
394 static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
396 return (void *)__get_dma_pages(gfp, (unsigned int)order);
399 static void dsp_pool_free(void *buf, void *order)
401 free_pages((unsigned long)buf, (unsigned int)order);
404 static void dsp_kmem_release(void)
407 mempool_destroy(kmem_pool_64K);
408 kmem_pool_64K = NULL;
412 mempool_destroy(kmem_pool_1M);
417 static int dsp_kmem_reserve(unsigned long size)
419 unsigned long len = size;
421 /* alignment check */
422 if (!is_aligned(size, SZ_64KB)) {
424 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
428 if (size > DSPSPACE_SIZE) {
430 "omapdsp: size(0x%lx) is larger than DSP memory space "
431 "size (0x%x.\n", size, DSPSPACE_SIZE);
435 if (size >= SZ_1MB) {
438 if (likely(!kmem_pool_1M))
439 kmem_pool_1M = mempool_create(nr,
444 mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
447 size &= ~(0xf << 20);
450 if (size >= SZ_64KB) {
453 if (likely(!kmem_pool_64K))
454 kmem_pool_64K = mempool_create(nr,
459 mempool_resize(kmem_pool_64K,
460 kmem_pool_64K->min_nr + nr, GFP_KERNEL);
462 size &= ~(0xf << 16);
471 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
473 struct page *page, *ps, *pe;
475 ps = virt_to_page(buf);
476 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
478 for (page = ps; page < pe; page++)
479 ClearPageReserved(page);
481 if ((order == ORDER_64KB) && likely(kmem_pool_64K))
482 mempool_free((void *)buf, kmem_pool_64K);
483 else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
484 mempool_free((void *)buf, kmem_pool_1M);
486 free_pages(buf, order);
490 exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
497 pgd = pgd_offset_k(virt);
498 pud = pud_offset(pgd, virt);
499 pmd = pmd_offset(pud, virt);
501 if (pmd_none(*pmd)) {
502 pte = pte_alloc_one_kernel(&init_mm, 0);
506 /* note: two PMDs will be set */
507 pmd_populate_kernel(&init_mm, pmd, pte);
510 pte = pte_offset_kernel(pmd, virt);
511 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
516 exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
522 pgd = pgd_offset_k(virt);
523 pud = pud_alloc(&init_mm, pgd, virt);
524 pmd = pmd_alloc(&init_mm, pud, virt);
526 if (virt & (1 << 20))
530 /* No good, fall back on smaller mappings. */
533 *pmd = __pmd(phys | prot);
534 flush_pmd_entry(pmd);
543 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
551 "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
554 prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
555 L_PTE_DIRTY | L_PTE_WRITE);
557 prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
558 PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
560 if (cpu_architecture() <= CPU_ARCH_ARMv5)
561 prot_sect |= PMD_BIT4;
565 while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
566 exmap_alloc_pte(virt, virt + off, prot_pte);
572 /* XXX: Not yet.. confuses dspfb -- PFM. */
574 while (size >= (PGDIR_SIZE / 2)) {
575 if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
578 virt += (PGDIR_SIZE / 2);
579 size -= (PGDIR_SIZE / 2);
583 while (size >= PAGE_SIZE) {
584 exmap_alloc_pte(virt, virt + off, prot_pte);
596 * A process can have old mappings. if we want to clear a pmd,
597 * we need to do it for all proceeses that use the old mapping.
601 exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
605 pte = pte_offset_map(pmd, addr);
610 pte_clear(&init_mm, addr, pte);
611 } while (pte++, addr += PAGE_SIZE, addr != end);
617 exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
622 pmd = pmd_offset(pud, addr);
624 next = pmd_addr_end(addr, end);
626 if (addr & (1 << 20))
629 if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
631 clean_pmd_entry(pmd);
635 if (pmd_none_or_clear_bad(pmd))
638 exmap_clear_pte_range(pmd, addr, next);
639 } while (pmd++, addr = next, addr != end);
643 exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
648 pud = pud_offset(pgd, addr);
650 next = pud_addr_end(addr, end);
651 if (pud_none_or_clear_bad(pud))
654 exmap_clear_pmd_range(pud, addr, next);
655 } while (pud++, addr = next, addr != end);
659 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
662 unsigned long next, end;
666 "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
669 pgd = pgd_offset_k(virt);
672 next = pgd_addr_end(virt, end);
673 if (pgd_none_or_clear_bad(pgd))
676 exmap_clear_pud_range(pgd, virt, next);
677 } while (pgd++, virt = next, virt != end);
685 "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
688 while (size >= PAGE_SIZE) {
689 pgd = pgd_offset_k(virt);
690 pud = pud_offset(pgd, virt);
691 pmd = pmd_offset(pud, virt);
692 pte = pte_offset_kernel(pmd, virt);
694 pte_clear(&init_mm, virt, pte);
703 static int exmap_valid(void *vadr, size_t len)
705 /* exmap_sem should be held before calling this function */
709 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
711 unsigned long mapsize;
712 struct exmap_tbl_entry *ent = &exmap_tbl[i];
716 mapadr = (void *)ent->vadr;
717 mapsize = 1 << (ent->order + PAGE_SHIFT);
718 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
719 if (vadr + len <= mapadr + mapsize) {
720 /* this map covers whole address. */
724 * this map covers partially.
725 * check rest portion.
727 len -= mapadr + mapsize - vadr;
728 vadr = mapadr + mapsize;
737 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
739 void *ds = (void *)daram_base;
740 void *de = (void *)daram_base + daram_size;
741 void *ss = (void *)saram_base;
742 void *se = (void *)saram_base + saram_size;
745 if ((vadr >= ds) && (vadr < de)) {
747 return MEM_TYPE_CROSSING;
749 return MEM_TYPE_DARAM;
750 } else if ((vadr >= ss) && (vadr < se)) {
752 return MEM_TYPE_CROSSING;
754 return MEM_TYPE_SARAM;
756 down_read(&exmap_sem);
757 if (exmap_valid(vadr, len))
758 ret = MEM_TYPE_EXTERN;
766 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
768 if (dsp_mem_type(p, len) <= 0) {
774 vsprintf(s, fmt, args);
777 "omapdsp: %s address(0x%p) and size(0x%x) is "
779 " (crossing different type of memories, or \n"
780 " external memory space where no "
781 "actual memory is mapped)\n",
791 * exmap_use(), unuse():
792 * when the mapped area is exported to user space with mmap,
793 * the usecount is incremented.
794 * while the usecount > 0, that area can't be released.
796 void exmap_use(void *vadr, size_t len)
800 down_write(&exmap_sem);
801 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
803 unsigned long mapsize;
804 struct exmap_tbl_entry *ent = &exmap_tbl[i];
808 mapadr = (void *)ent->vadr;
809 mapsize = 1 << (ent->order + PAGE_SHIFT);
810 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
813 up_write(&exmap_sem);
816 void exmap_unuse(void *vadr, size_t len)
820 down_write(&exmap_sem);
821 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
823 unsigned long mapsize;
824 struct exmap_tbl_entry *ent = &exmap_tbl[i];
828 mapadr = (void *)ent->vadr;
829 mapsize = 1 << (ent->order + PAGE_SHIFT);
830 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
833 up_write(&exmap_sem);
838 * returns physical address, and sets len to valid length
840 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
844 if (is_dsp_internal_mem(vadr)) {
846 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
847 return (unsigned long)vadr;
851 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
853 unsigned long mapsize;
854 struct exmap_tbl_entry *ent = &exmap_tbl[i];
858 mapadr = (void *)ent->vadr;
859 mapsize = 1 << (ent->order + PAGE_SHIFT);
860 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
861 *len = mapadr + mapsize - vadr;
862 return __pa(ent->buf) + vadr - mapadr;
866 /* valid mapping not found */
873 #ifdef CONFIG_ARCH_OMAP1
874 static dsp_mmu_reg_t get_cam_l_va_mask(dsp_mmu_reg_t pgsz)
877 case DSP_MMU_CAM_PAGESIZE_1MB:
878 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
879 DSP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
880 case DSP_MMU_CAM_PAGESIZE_64KB:
881 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
882 DSP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
883 case DSP_MMU_CAM_PAGESIZE_4KB:
884 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
885 DSP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
886 case DSP_MMU_CAM_PAGESIZE_1KB:
887 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
888 DSP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
892 #endif /* CONFIG_ARCH_OMAP1 */
894 #if defined(CONFIG_ARCH_OMAP1)
895 #define get_cam_va_mask(pgsz) \
896 ((u32)DSP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
897 (u32)get_cam_l_va_mask(pgsz) << 6)
898 #elif defined(CONFIG_ARCH_OMAP2)
899 #define get_cam_va_mask(pgsz) \
900 ((pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
901 (pgsz == DSP_MMU_CAM_PAGESIZE_1MB) ? 0xfff00000 : \
902 (pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
903 (pgsz == DSP_MMU_CAM_PAGESIZE_4KB) ? 0xfffff000 : 0)
904 #endif /* CONFIG_ARCH_OMAP2 */
906 static void get_tlb_lock(struct tlb_lock *tlb_lock)
908 dsp_mmu_reg_t lock = dsp_mmu_read_reg(DSP_MMU_LOCK);
910 tlb_lock->base = (lock & DSP_MMU_LOCK_BASE_MASK) >>
911 DSP_MMU_LOCK_BASE_SHIFT;
912 tlb_lock->victim = (lock & DSP_MMU_LOCK_VICTIM_MASK) >>
913 DSP_MMU_LOCK_VICTIM_SHIFT;
916 static void set_tlb_lock(struct tlb_lock *tlb_lock)
918 dsp_mmu_write_reg((tlb_lock->base << DSP_MMU_LOCK_BASE_SHIFT) |
919 (tlb_lock->victim << DSP_MMU_LOCK_VICTIM_SHIFT),
923 static void __read_tlb(struct tlb_lock *tlb_lock, struct cam_ram_regset *cr)
926 set_tlb_lock(tlb_lock);
928 #if defined(CONFIG_ARCH_OMAP1)
929 /* read a TLB entry */
930 dsp_mmu_write_reg(DSP_MMU_LD_TLB_RD, DSP_MMU_LD_TLB);
932 cr->cam_h = dsp_mmu_read_reg(DSP_MMU_READ_CAM_H);
933 cr->cam_l = dsp_mmu_read_reg(DSP_MMU_READ_CAM_L);
934 cr->ram_h = dsp_mmu_read_reg(DSP_MMU_READ_RAM_H);
935 cr->ram_l = dsp_mmu_read_reg(DSP_MMU_READ_RAM_L);
936 #elif defined(CONFIG_ARCH_OMAP2)
937 cr->cam = dsp_mmu_read_reg(DSP_MMU_READ_CAM);
938 cr->ram = dsp_mmu_read_reg(DSP_MMU_READ_RAM);
942 static void __load_tlb(struct cam_ram_regset *cr)
944 #if defined(CONFIG_ARCH_OMAP1)
945 dsp_mmu_write_reg(cr->cam_h, DSP_MMU_CAM_H);
946 dsp_mmu_write_reg(cr->cam_l, DSP_MMU_CAM_L);
947 dsp_mmu_write_reg(cr->ram_h, DSP_MMU_RAM_H);
948 dsp_mmu_write_reg(cr->ram_l, DSP_MMU_RAM_L);
949 #elif defined(CONFIG_ARCH_OMAP2)
950 dsp_mmu_write_reg(cr->cam | DSP_MMU_CAM_V, DSP_MMU_CAM);
951 dsp_mmu_write_reg(cr->ram, DSP_MMU_RAM);
954 /* flush the entry */
957 /* load a TLB entry */
958 dsp_mmu_write_reg(DSP_MMU_LD_TLB_LD, DSP_MMU_LD_TLB);
961 static int dsp_mmu_load_tlb(struct tlb_entry *tlb_ent)
963 struct tlb_lock tlb_lock;
964 struct cam_ram_regset cr;
966 #ifdef CONFIG_ARCH_OMAP1
967 clk_enable(dsp_ck_handle);
968 omap_dsp_request_mem();
971 get_tlb_lock(&tlb_lock);
972 for (tlb_lock.victim = 0;
973 tlb_lock.victim < tlb_lock.base;
975 struct cam_ram_regset tmp_cr;
977 /* read a TLB entry */
978 __read_tlb(&tlb_lock, &tmp_cr);
979 if (!cam_ram_valid(tmp_cr))
982 set_tlb_lock(&tlb_lock);
985 /* The last (31st) entry cannot be locked? */
986 if (tlb_lock.victim == 31) {
987 printk(KERN_ERR "omapdsp: TLB is full.\n");
991 if (tlb_ent->va & ~get_cam_va_mask(tlb_ent->pgsz)) {
993 "omapdsp: mapping vadr (0x%06x) is not "
994 "aligned boundary\n", tlb_ent->va);
998 #if defined(CONFIG_ARCH_OMAP1)
999 cr.cam_h = tlb_ent->va >> 22;
1000 cr.cam_l = (tlb_ent->va >> 6 & get_cam_l_va_mask(tlb_ent->pgsz)) |
1001 tlb_ent->prsvd | tlb_ent->pgsz;
1002 cr.ram_h = tlb_ent->pa >> 16;
1003 cr.ram_l = (tlb_ent->pa & DSP_MMU_RAM_L_RAM_LSB_MASK) | tlb_ent->ap;
1004 #elif defined(CONFIG_ARCH_OMAP2)
1005 cr.cam = (tlb_ent->va & DSP_MMU_CAM_VATAG_MASK) |
1006 tlb_ent->prsvd | tlb_ent->pgsz;
1007 cr.ram = tlb_ent->pa | tlb_ent->endian | tlb_ent->elsz;
1011 /* update lock base */
1012 if (tlb_lock.victim == tlb_lock.base)
1014 tlb_lock.victim = tlb_lock.base;
1015 set_tlb_lock(&tlb_lock);
1017 #ifdef CONFIG_ARCH_OMAP1
1018 omap_dsp_release_mem();
1019 clk_disable(dsp_ck_handle);
1024 static int dsp_mmu_clear_tlb(dsp_long_t vadr)
1026 struct tlb_lock tlb_lock;
1030 #ifdef CONFIG_ARCH_OMAP1
1031 clk_enable(dsp_ck_handle);
1032 omap_dsp_request_mem();
1035 get_tlb_lock(&tlb_lock);
1036 for (i = 0; i < tlb_lock.base; i++) {
1037 struct cam_ram_regset cr;
1041 /* read a TLB entry */
1042 tlb_lock.victim = i;
1043 __read_tlb(&tlb_lock, &cr);
1044 if (!cam_ram_valid(cr))
1047 #if defined(CONFIG_ARCH_OMAP1)
1048 pgsz = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
1049 cam_va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
1050 (u32)(cr.cam_l & get_cam_l_va_mask(pgsz)) << 6;
1051 #elif defined(CONFIG_ARCH_OMAP2)
1052 pgsz = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
1053 cam_va = cr.cam & get_cam_va_mask(pgsz);
1057 /* flush the entry */
1063 /* set new lock base */
1064 tlb_lock.base = max_valid + 1;
1065 tlb_lock.victim = max_valid + 1;
1066 set_tlb_lock(&tlb_lock);
1068 #ifdef CONFIG_ARCH_OMAP1
1069 omap_dsp_release_mem();
1070 clk_disable(dsp_ck_handle);
1075 static void dsp_mmu_gflush(void)
1077 struct tlb_lock tlb_lock;
1079 #ifdef CONFIG_ARCH_OMAP1
1080 clk_enable(dsp_ck_handle);
1081 omap_dsp_request_mem();
1085 tlb_lock.base = exmap_preserved_cnt;
1086 tlb_lock.victim = exmap_preserved_cnt;
1087 set_tlb_lock(&tlb_lock);
1089 #ifdef CONFIG_ARCH_OMAP1
1090 omap_dsp_release_mem();
1091 clk_disable(dsp_ck_handle);
1098 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
1099 * In this case, the buffer for DSP is allocated in this routine,
1100 * then it is mapped.
1101 * On the other hand, for example - frame buffer sharing, calls
1102 * this function with padr set. It means some known address space
1103 * pointed with padr is going to be shared with DSP.
1105 static int dsp_exmap(dsp_long_t dspadr, unsigned long padr, unsigned long size,
1106 enum exmap_type_e type)
1110 unsigned int order = 0;
1113 dsp_long_t _dspadr = dspadr;
1114 unsigned long _padr = padr;
1115 void *_vadr = dspbyte_to_virt(dspadr);
1116 unsigned long _size = size;
1117 struct tlb_entry tlb_ent;
1118 struct exmap_tbl_entry *exmap_ent;
1123 #define MINIMUM_PAGESZ SZ_4KB
1127 if (!is_aligned(size, MINIMUM_PAGESZ)) {
1129 "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
1132 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
1134 "omapdsp: DSP address(0x%x) is not aligned.\n", dspadr);
1137 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
1139 "omapdsp: physical address(0x%lx) is not aligned.\n",
1144 /* address validity check */
1145 if ((dspadr < dspmem_size) ||
1146 (dspadr >= DSPSPACE_SIZE) ||
1147 ((dspadr + size > DSP_INIT_PAGE) &&
1148 (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
1150 "omapdsp: illegal address/size for dsp_exmap().\n");
1154 down_write(&exmap_sem);
1157 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1158 unsigned long mapsize;
1159 struct exmap_tbl_entry *tmp_ent = &exmap_tbl[i];
1161 if (!tmp_ent->valid)
1163 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
1164 if ((_vadr + size > tmp_ent->vadr) &&
1165 (_vadr < tmp_ent->vadr + mapsize)) {
1166 printk(KERN_ERR "omapdsp: exmap page overlap!\n");
1167 up_write(&exmap_sem);
1174 /* Are there any free TLB lines? */
1175 for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1176 if (!exmap_tbl[idx].valid)
1179 printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
1184 exmap_ent = &exmap_tbl[idx];
1188 * 1KB mapping in OMAP1,
1189 * 16MB mapping in OMAP2.
1191 if ((_size >= SZ_1MB) &&
1192 (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
1193 is_aligned(_dspadr, SZ_1MB)) {
1195 pgsz = DSP_MMU_CAM_PAGESIZE_1MB;
1196 } else if ((_size >= SZ_64KB) &&
1197 (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
1198 is_aligned(_dspadr, SZ_64KB)) {
1200 pgsz = DSP_MMU_CAM_PAGESIZE_64KB;
1203 pgsz = DSP_MMU_CAM_PAGESIZE_4KB;
1206 order = get_order(unit);
1208 /* buffer allocation */
1209 if (type == EXMAP_TYPE_MEM) {
1210 struct page *page, *ps, *pe;
1212 if ((order == ORDER_1MB) && likely(kmem_pool_1M))
1213 buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
1214 else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
1215 buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
1217 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
1224 /* mark the pages as reserved; this is needed for mmap */
1225 ps = virt_to_page(buf);
1226 pe = virt_to_page(buf + unit);
1228 for (page = ps; page < pe; page++)
1229 SetPageReserved(page);
1235 * mapping for ARM MMU:
1236 * we should not access to the allocated memory through 'buf'
1237 * since this area should not be cashed.
1239 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
1243 /* loading DSP TLB entry */
1244 INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
1245 status = dsp_mmu_load_tlb(&tlb_ent);
1247 exmap_clear_armmmu((unsigned long)_vadr, unit);
1251 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
1252 exmap_ent->link.prev = prev;
1254 exmap_tbl[prev].link.next = idx;
1256 if ((_size -= unit) == 0) { /* normal completion */
1257 up_write(&exmap_sem);
1263 _padr = padr ? _padr + unit : 0;
1268 up_write(&exmap_sem);
1270 dsp_mem_free_pages((unsigned long)buf, order);
1271 dsp_exunmap(dspadr);
1275 static unsigned long unmap_free_arm(struct exmap_tbl_entry *ent)
1279 /* clearing ARM MMU */
1280 size = 1 << (ent->order + PAGE_SHIFT);
1281 exmap_clear_armmmu((unsigned long)ent->vadr, size);
1283 /* freeing allocated memory */
1284 if (ent->type == EXMAP_TYPE_MEM) {
1285 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
1287 "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
1290 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1291 else if (ent->type == EXMAP_TYPE_FB) {
1294 status = omapfb_unregister_client(omapfb_nb);
1296 printk("omapfb_unregister_client(): "
1299 printk("omapfb_runegister_client(): "
1300 "failure(%d)\n", status);
1311 static int dsp_exunmap(dsp_long_t dspadr)
1316 struct exmap_tbl_entry *ent;
1319 vadr = dspbyte_to_virt(dspadr);
1320 down_write(&exmap_sem);
1321 for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1322 ent = &exmap_tbl[idx];
1323 if ((!ent->valid) || ent->prsvd)
1325 if (ent->vadr == vadr)
1328 up_write(&exmap_sem);
1330 "omapdsp: address %06x not found in exmap_tbl.\n", dspadr);
1334 if (ent->usecount > 0) {
1336 "omapdsp: exmap reference count is not 0.\n"
1337 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
1338 idx, ent->vadr, ent->order, ent->usecount);
1339 up_write(&exmap_sem);
1342 /* clearing DSP TLB entry */
1343 dsp_mmu_clear_tlb(dspadr);
1345 /* clear ARM MMU and free buffer */
1346 size = unmap_free_arm(ent);
1350 /* we don't free PTEs */
1353 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
1355 if ((idx = ent->link.next) < 0)
1356 goto up_out; /* normal completion */
1357 ent = &exmap_tbl[idx];
1360 if (ent->vadr == vadr)
1361 goto found_map; /* continue */
1364 "omapdsp: illegal exmap_tbl grouping!\n"
1365 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1366 vadr, idx, ent->vadr);
1367 up_write(&exmap_sem);
1371 up_write(&exmap_sem);
1375 static void exmap_flush(void)
1377 struct exmap_tbl_entry *ent;
1380 down_write(&exmap_sem);
1382 /* clearing DSP TLB entry */
1385 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1386 ent = &exmap_tbl[i];
1387 if (ent->valid && (!ent->prsvd)) {
1388 unmap_free_arm(ent);
1394 flush_tlb_kernel_range(dspmem_base + dspmem_size,
1395 dspmem_base + DSPSPACE_SIZE);
1396 up_write(&exmap_sem);
1399 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1401 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1402 #endif /* CONFIG_FB */
1404 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1405 static int omapfb_notifier_cb(struct notifier_block *omapfb_nb,
1406 unsigned long event, void *fbi)
1409 printk("omapfb_notifier_cb(): event = %s\n",
1410 (event == OMAPFB_EVENT_READY) ? "READY" :
1411 (event == OMAPFB_EVENT_DISABLED) ? "DISABLED" : "Unknown");
1412 if (event == OMAPFB_EVENT_READY)
1414 else if (event == OMAPFB_EVENT_DISABLED)
1420 static int dsp_fbexport(dsp_long_t *dspadr)
1422 dsp_long_t dspadr_actual;
1423 unsigned long padr_sys, padr, fbsz_sys, fbsz;
1425 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1429 printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1431 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1434 "omapdsp: frame buffer has been exported already!\n");
1439 if (num_registered_fb == 0) {
1440 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1443 if (num_registered_fb != 1) {
1445 "omapdsp: %d frame buffers found. we use first one.\n",
1448 padr_sys = registered_fb[0]->fix.smem_start;
1449 fbsz_sys = registered_fb[0]->fix.smem_len;
1450 if (fbsz_sys == 0) {
1452 "omapdsp: framebuffer doesn't seem to be configured "
1453 "correctly! (size=0)\n");
1458 * align padr and fbsz to 4kB boundary
1459 * (should be noted to the user afterwards!)
1461 padr = padr_sys & ~(SZ_4KB-1);
1462 fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1464 /* line up dspadr offset with padr */
1466 (fbsz > SZ_1MB) ? lineup_offset(*dspadr, padr, SZ_1MB-1) :
1467 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1468 /* (fbsz > SZ_4KB) ? */ *dspadr;
1469 if (dspadr_actual != *dspadr)
1471 "omapdsp: actual dspadr for FBEXPORT = %08x\n",
1473 *dspadr = dspadr_actual;
1475 cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1477 printk(KERN_ERR "omapdsp: exmap failure.\n");
1481 if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1483 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1484 " !! screen base address or size is not aligned in 4kB: !!\n"
1485 " !! actual screen adr = %08lx, size = %08lx !!\n"
1486 " !! exporting adr = %08lx, size = %08lx !!\n"
1487 " !! Make sure that the framebuffer is allocated with 4kB-order! !!\n"
1488 " !! Otherwise DSP can corrupt the kernel memory. !!\n"
1489 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1490 padr_sys, fbsz_sys, padr, fbsz);
1493 #ifdef CONFIG_ARCH_OMAP1
1494 /* increase the DMA priority */
1495 set_emiff_dma_prio(15);
1498 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1499 omapfb_nb = kmalloc(sizeof(struct omapfb_notifier_block), GFP_KERNEL);
1500 if (omapfb_nb == NULL) {
1502 "omapdsp: failed to allocate memory for omapfb_nb!\n");
1503 dsp_exunmap(dspadr_actual);
1506 status = omapfb_register_client(omapfb_nb, omapfb_notifier_cb, NULL);
1508 printk("omapfb_register_client(): success\n");
1510 printk("omapfb_register_client(): failure(%d)\n", status);
1516 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1518 static int dsp_fbexport(dsp_long_t *dspadr)
1520 printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1524 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1526 static void exmap_setup_preserved_mem_page(void *buf, dsp_long_t dspadr,
1531 struct tlb_entry tlb_ent;
1534 virt = dspbyte_to_virt(dspadr);
1535 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1536 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], buf, virt);
1537 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1538 dsp_mmu_load_tlb(&tlb_ent);
1541 static void exmap_clear_mem_page(dsp_long_t dspadr)
1545 virt = dspbyte_to_virt(dspadr);
1546 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1547 /* DSP MMU is shutting down. not handled here. */
1550 #ifdef CONFIG_ARCH_OMAP2
1551 static void exmap_setup_iomap_page(unsigned long phys, unsigned long dsp_io_adr,
1556 struct tlb_entry tlb_ent;
1558 dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1559 virt = dspbyte_to_virt(dspadr);
1560 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1561 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], NULL, virt);
1562 INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
1563 dsp_mmu_load_tlb(&tlb_ent);
1566 static void exmap_clear_iomap_page(unsigned long dsp_io_adr)
1571 dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1572 virt = dspbyte_to_virt(dspadr);
1573 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1574 /* DSP MMU is shutting down. not handled here. */
1576 #endif /* CONFIG_ARCH_OMAP2 */
1578 #define OMAP2420_GPT5_BASE (L4_24XX_BASE + 0x7c000)
1579 #define OMAP2420_GPT6_BASE (L4_24XX_BASE + 0x7e000)
1580 #define OMAP2420_GPT7_BASE (L4_24XX_BASE + 0x80000)
1581 #define OMAP2420_GPT8_BASE (L4_24XX_BASE + 0x82000)
1582 #define OMAP24XX_EAC_BASE (L4_24XX_BASE + 0x90000)
1584 static int exmap_setup_preserved_entries(void)
1588 exmap_setup_preserved_mem_page(dspvect_page, DSP_INIT_PAGE, n++);
1589 #ifdef CONFIG_ARCH_OMAP2
1590 exmap_setup_iomap_page(OMAP24XX_PRCM_BASE, 0x7000, n++);
1591 #ifdef CONFIG_ARCH_OMAP2420
1592 exmap_setup_iomap_page(OMAP2420_GPT5_BASE, 0xe000, n++);
1593 exmap_setup_iomap_page(OMAP2420_GPT6_BASE, 0xe800, n++);
1594 exmap_setup_iomap_page(OMAP2420_GPT7_BASE, 0xf000, n++);
1595 exmap_setup_iomap_page(OMAP2420_GPT8_BASE, 0xf800, n++);
1596 #endif /* CONFIG_ARCH_OMAP2420 */
1597 exmap_setup_iomap_page(OMAP24XX_EAC_BASE, 0x10000, n++);
1598 exmap_setup_iomap_page(OMAP24XX_MAILBOX_BASE, 0x11000, n++);
1599 #endif /* CONFIG_ARCH_OMAP2 */
1604 static void exmap_clear_preserved_entries(void)
1606 exmap_clear_mem_page(DSP_INIT_PAGE);
1607 #ifdef CONFIG_ARCH_OMAP2
1608 exmap_clear_iomap_page(0x7000); /* PRCM */
1609 #ifdef CONFIG_ARCH_OMAP2420
1610 exmap_clear_iomap_page(0xe000); /* GPT5 */
1611 exmap_clear_iomap_page(0xe800); /* GPT6 */
1612 exmap_clear_iomap_page(0xf000); /* GPT7 */
1613 exmap_clear_iomap_page(0xf800); /* GPT8 */
1614 #endif /* CONFIG_ARCH_OMAP2420 */
1615 exmap_clear_iomap_page(0x10000); /* EAC */
1616 exmap_clear_iomap_page(0x11000); /* MAILBOX */
1617 #endif /* CONFIG_ARCH_OMAP2 */
1620 #ifdef CONFIG_ARCH_OMAP1
1621 static int dsp_mmu_itack(void)
1623 unsigned long dspadr;
1625 printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1626 if (!dsp_err_isset(ERRCODE_MMU)) {
1627 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1630 dspadr = dsp_fault_adr & ~(SZ_4K-1);
1631 dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM); /* FIXME: reserve TLB entry for this */
1632 printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1633 dsp_set_runlevel(RUNLEVEL_RECOVERY);
1636 dsp_exunmap(dspadr);
1637 dsp_err_clear(ERRCODE_MMU);
1640 #endif /* CONFIG_ARCH_OMAP1 */
1642 #ifdef CONFIG_ARCH_OMAP2
1643 #define MMU_IRQ_MASK \
1644 (DSP_MMU_IRQ_MULTIHITFAULT | \
1645 DSP_MMU_IRQ_TABLEWALKFAULT | \
1646 DSP_MMU_IRQ_EMUMISS | \
1647 DSP_MMU_IRQ_TRANSLATIONFAULT | \
1648 DSP_MMU_IRQ_TLBMISS)
1651 static void dsp_mmu_init(void)
1653 struct tlb_lock tlb_lock;
1655 #ifdef CONFIG_ARCH_OMAP1
1656 clk_enable(dsp_ck_handle);
1657 omap_dsp_request_mem();
1659 down_write(&exmap_sem);
1661 #if defined(CONFIG_ARCH_OMAP1)
1662 dsp_mmu_disable(); /* clear all */
1664 #elif defined(CONFIG_ARCH_OMAP2)
1669 /* DSP TLB initialization */
1671 tlb_lock.victim = 0;
1672 set_tlb_lock(&tlb_lock);
1674 exmap_preserved_cnt = exmap_setup_preserved_entries();
1676 #ifdef CONFIG_ARCH_OMAP2
1677 /* MMU IRQ mask setup */
1678 dsp_mmu_write_reg(MMU_IRQ_MASK, DSP_MMU_IRQENABLE);
1681 up_write(&exmap_sem);
1682 #ifdef CONFIG_ARCH_OMAP1
1683 omap_dsp_release_mem();
1684 clk_disable(dsp_ck_handle);
1688 static void dsp_mmu_shutdown(void)
1691 exmap_clear_preserved_entries();
1695 #ifdef CONFIG_ARCH_OMAP1
1697 * intmem_enable() / disable():
1698 * if the address is in DSP internal memories,
1699 * we send PM mailbox commands so that DSP DMA domain won't go in idle
1700 * when ARM is accessing to those memories.
1702 static int intmem_enable(void)
1706 if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1707 ret = mbcompose_send(PM, PM_ENABLE, DSPREG_ICR_DMA);
1712 static void intmem_disable(void) {
1713 if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1714 mbcompose_send(PM, PM_DISABLE, DSPREG_ICR_DMA);
1716 #endif /* CONFIG_ARCH_OMAP1 */
1719 * dsp_mem_enable() / disable()
1721 #ifdef CONFIG_ARCH_OMAP1
1722 int intmem_usecount;
1725 int dsp_mem_enable(void *adr)
1729 if (is_dsp_internal_mem(adr)) {
1730 #ifdef CONFIG_ARCH_OMAP1
1731 if (intmem_usecount++ == 0)
1732 ret = omap_dsp_request_mem();
1735 down_read(&exmap_sem);
1740 void dsp_mem_disable(void *adr)
1742 if (is_dsp_internal_mem(adr)) {
1743 #ifdef CONFIG_ARCH_OMAP1
1744 if (--intmem_usecount == 0)
1745 omap_dsp_release_mem();
1748 up_read(&exmap_sem);
1752 #ifdef CONFIG_ARCH_OMAP1
1753 void dsp_mem_usecount_clear(void)
1755 if (intmem_usecount != 0) {
1757 "omapdsp: unbalanced memory request/release detected.\n"
1758 " intmem_usecount is not zero at where "
1759 "it should be! ... fixed to be zero.\n");
1760 intmem_usecount = 0;
1761 omap_dsp_release_mem();
1764 #endif /* CONFIG_ARCH_OMAP1 */
1767 * dsp_mem file operations
1769 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1773 mutex_lock(&file->f_dentry->d_inode->i_mutex);
1776 file->f_pos = offset;
1780 file->f_pos += offset;
1786 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1790 static ssize_t intmem_read(struct file *file, char __user *buf, size_t count,
1793 unsigned long p = *ppos;
1794 void *vadr = dspbyte_to_virt(p);
1795 ssize_t size = dspmem_size;
1800 #ifdef CONFIG_ARCH_OMAP1
1801 clk_enable(api_ck_handle);
1804 if (count > size - p)
1806 if (copy_to_user(buf, vadr, read)) {
1812 #ifdef CONFIG_ARCH_OMAP1
1813 clk_disable(api_ck_handle);
1818 static ssize_t exmem_read(struct file *file, char __user *buf, size_t count,
1821 unsigned long p = *ppos;
1822 void *vadr = dspbyte_to_virt(p);
1824 if (!exmap_valid(vadr, count)) {
1826 "omapdsp: DSP address %08lx / size %08x "
1827 "is not valid!\n", p, count);
1830 if (count > DSPSPACE_SIZE - p)
1831 count = DSPSPACE_SIZE - p;
1832 if (copy_to_user(buf, vadr, count))
1839 static ssize_t dsp_mem_read(struct file *file, char __user *buf, size_t count,
1843 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1845 if (dsp_mem_enable(vadr) < 0)
1847 if (is_dspbyte_internal_mem(*ppos))
1848 ret = intmem_read(file, buf, count, ppos);
1850 ret = exmem_read(file, buf, count, ppos);
1851 dsp_mem_disable(vadr);
1856 static ssize_t intmem_write(struct file *file, const char __user *buf,
1857 size_t count, loff_t *ppos)
1859 unsigned long p = *ppos;
1860 void *vadr = dspbyte_to_virt(p);
1861 ssize_t size = dspmem_size;
1866 #ifdef CONFIG_ARCH_OMAP1
1867 clk_enable(api_ck_handle);
1870 if (count > size - p)
1872 if (copy_from_user(vadr, buf, written)) {
1878 #ifdef CONFIG_ARCH_OMAP1
1879 clk_disable(api_ck_handle);
1884 static ssize_t exmem_write(struct file *file, const char __user *buf,
1885 size_t count, loff_t *ppos)
1887 unsigned long p = *ppos;
1888 void *vadr = dspbyte_to_virt(p);
1890 if (!exmap_valid(vadr, count)) {
1892 "omapdsp: DSP address %08lx / size %08x "
1893 "is not valid!\n", p, count);
1896 if (count > DSPSPACE_SIZE - p)
1897 count = DSPSPACE_SIZE - p;
1898 if (copy_from_user(vadr, buf, count))
1905 static ssize_t dsp_mem_write(struct file *file, const char __user *buf,
1906 size_t count, loff_t *ppos)
1909 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1911 if (dsp_mem_enable(vadr) < 0)
1913 if (is_dspbyte_internal_mem(*ppos))
1914 ret = intmem_write(file, buf, count, ppos);
1916 ret = exmem_write(file, buf, count, ppos);
1917 dsp_mem_disable(vadr);
1922 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1923 unsigned int cmd, unsigned long arg)
1926 case MEM_IOCTL_MMUINIT:
1930 case MEM_IOCTL_EXMAP:
1932 struct omap_dsp_mapinfo mapinfo;
1933 if (copy_from_user(&mapinfo, (void __user *)arg,
1936 return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1940 case MEM_IOCTL_EXUNMAP:
1941 return dsp_exunmap((unsigned long)arg);
1943 case MEM_IOCTL_EXMAP_FLUSH:
1947 case MEM_IOCTL_FBEXPORT:
1951 if (copy_from_user(&dspadr, (void __user *)arg,
1952 sizeof(dsp_long_t)))
1954 ret = dsp_fbexport(&dspadr);
1955 if (copy_to_user((void __user *)arg, &dspadr,
1956 sizeof(dsp_long_t)))
1961 #ifdef CONFIG_ARCH_OMAP1
1962 case MEM_IOCTL_MMUITACK:
1963 return dsp_mmu_itack();
1966 case MEM_IOCTL_KMEM_RESERVE:
1969 if (copy_from_user(&size, (void __user *)arg,
1972 return dsp_kmem_reserve(size);
1975 case MEM_IOCTL_KMEM_RELEASE:
1980 return -ENOIOCTLCMD;
1984 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1992 static int dsp_mem_open(struct inode *inode, struct file *file)
1994 if (!capable(CAP_SYS_RAWIO))
2000 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
2002 * fb update functions:
2003 * fbupd_response() is executed by the workqueue.
2004 * fbupd_cb() is called when fb update is done, in interrupt context.
2005 * mbox_fbupd() is called when KFUNC:FBCTL:UPD is received from DSP.
2007 static void fbupd_response(void *arg)
2011 status = mbcompose_send(KFUNC, KFUNC_FBCTL, FBCTL_UPD);
2013 /* FIXME: DSP is busy !! */
2015 "omapdsp: DSP is busy when trying to send FBCTL:UPD "
2020 static DECLARE_WORK(fbupd_response_work, (void (*)(void *))fbupd_response,
2023 static void fbupd_cb(void *arg)
2025 schedule_work(&fbupd_response_work);
2028 void mbox_fbctl_upd(void)
2030 struct omapfb_update_window win;
2031 volatile unsigned short *buf = ipbuf_sys_da->d;
2033 /* FIXME: try count sometimes exceeds 1000. */
2034 if (sync_with_dsp(&ipbuf_sys_da->s, TID_ANON, 5000) < 0) {
2035 printk(KERN_ERR "mbox: FBCTL:UPD - IPBUF sync failed!\n");
2041 win.height = buf[3];
2042 win.format = buf[4];
2043 release_ipbuf_pvt(ipbuf_sys_da);
2045 if (!omapfb_ready) {
2047 "omapdsp: fbupd() called while HWA742 is not ready!\n");
2050 //printk("calling omapfb_update_window_async()\n");
2051 omapfb_update_window_async(registered_fb[1], &win, fbupd_cb, NULL);
2054 #else /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2056 void mbox_fbctl_upd(void)
2059 #endif /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2066 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
2070 struct tlb_lock tlb_lock_org;
2073 #ifdef CONFIG_ARCH_OMAP1
2074 clk_enable(dsp_ck_handle);
2075 omap_dsp_request_mem();
2077 down_read(&exmap_sem);
2079 get_tlb_lock(&tlb_lock_org);
2081 #if defined(CONFIG_ARCH_OMAP1)
2082 len = sprintf(buf, "P: preserved, V: valid\n"
2083 "ety P V size cam_va ram_pa ap\n");
2084 /* 00: P V 4KB 0x300000 0x10171800 FA */
2085 #elif defined(CONFIG_ARCH_OMAP2)
2086 len = sprintf(buf, "P: preserved, V: valid\n"
2087 "B: big endian, L:little endian, "
2088 "M: mixed page attribute\n"
2089 "ety P V size cam_va ram_pa E ES M\n");
2090 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
2093 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2094 struct cam_ram_regset cr;
2095 struct tlb_lock tlb_lock_tmp;
2096 struct tlb_entry ent;
2097 #if defined(CONFIG_ARCH_OMAP1)
2098 char *pgsz_str, *ap_str;
2099 #elif defined(CONFIG_ARCH_OMAP2)
2100 char *pgsz_str, *elsz_str;
2103 /* read a TLB entry */
2104 tlb_lock_tmp.base = tlb_lock_org.base;
2105 tlb_lock_tmp.victim = i;
2106 __read_tlb(&tlb_lock_tmp, &cr);
2108 #if defined(CONFIG_ARCH_OMAP1)
2109 ent.pgsz = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
2110 ent.prsvd = cr.cam_l & DSP_MMU_CAM_P;
2111 ent.valid = cr.cam_l & DSP_MMU_CAM_V;
2112 ent.ap = cr.ram_l & DSP_MMU_RAM_L_AP_MASK;
2113 ent.va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
2114 (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
2115 ent.pa = (unsigned long)cr.ram_h << 16 |
2116 (cr.ram_l & DSP_MMU_RAM_L_RAM_LSB_MASK);
2118 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
2119 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2120 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
2121 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1KB) ? " 1KB":
2123 ap_str = (ent.ap == DSP_MMU_RAM_L_AP_RO) ? "RO":
2124 (ent.ap == DSP_MMU_RAM_L_AP_FA) ? "FA":
2125 (ent.ap == DSP_MMU_RAM_L_AP_NA) ? "NA":
2127 #elif defined(CONFIG_ARCH_OMAP2)
2128 ent.pgsz = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
2129 ent.prsvd = cr.cam & DSP_MMU_CAM_P;
2130 ent.valid = cr.cam & DSP_MMU_CAM_V;
2131 ent.va = cr.cam & DSP_MMU_CAM_VATAG_MASK;
2132 ent.endian = cr.ram & DSP_MMU_RAM_ENDIANNESS;
2133 ent.elsz = cr.ram & DSP_MMU_RAM_ELEMENTSIZE_MASK;
2134 ent.pa = cr.ram & DSP_MMU_RAM_PADDR_MASK;
2135 ent.mixed = cr.ram & DSP_MMU_RAM_MIXED;
2137 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
2138 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
2139 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2140 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
2142 elsz_str = (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_8) ? " 8":
2143 (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_16) ? "16":
2144 (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_32) ? "32":
2148 if (i == tlb_lock_org.base)
2149 len += sprintf(buf + len, "lock base = %d\n",
2151 if (i == tlb_lock_org.victim)
2152 len += sprintf(buf + len, "victim = %d\n",
2153 tlb_lock_org.victim);
2154 #if defined(CONFIG_ARCH_OMAP1)
2155 len += sprintf(buf + len,
2156 /* 00: P V 4KB 0x300000 0x10171800 FA */
2157 "%02d: %c %c %s 0x%06x 0x%08lx %s\n",
2159 ent.prsvd ? 'P' : ' ',
2160 ent.valid ? 'V' : ' ',
2161 pgsz_str, ent.va, ent.pa, ap_str);
2162 #elif defined(CONFIG_ARCH_OMAP2)
2163 len += sprintf(buf + len,
2164 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
2165 "%02d: %c %c %s 0x%06x 0x%08lx %c %s %c\n",
2167 ent.prsvd ? 'P' : ' ',
2168 ent.valid ? 'V' : ' ',
2169 pgsz_str, ent.va, ent.pa,
2170 ent.endian ? 'B' : 'L',
2172 ent.mixed ? 'M' : ' ');
2173 #endif /* CONFIG_ARCH_OMAP2 */
2176 /* restore victim entry */
2177 set_tlb_lock(&tlb_lock_org);
2179 up_read(&exmap_sem);
2180 #ifdef CONFIG_ARCH_OMAP1
2181 omap_dsp_release_mem();
2182 clk_disable(dsp_ck_handle);
2188 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
2194 down_read(&exmap_sem);
2195 len = sprintf(buf, " dspadr size buf size uc\n");
2196 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
2197 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2198 struct exmap_tbl_entry *ent = &exmap_tbl[i];
2201 enum exmap_type_e type;
2204 /* find a top of link */
2205 if (!ent->valid || (ent->link.prev >= 0))
2213 ent = &exmap_tbl[idx];
2214 size += PAGE_SIZE << ent->order;
2215 } while ((idx = ent->link.next) >= 0);
2217 len += sprintf(buf + len, "0x%06x %#8lx",
2218 virt_to_dspbyte(vadr), size);
2220 if (type == EXMAP_TYPE_FB) {
2221 len += sprintf(buf + len, " framebuf\n");
2223 len += sprintf(buf + len, "\n");
2226 ent = &exmap_tbl[idx];
2227 len += sprintf(buf + len,
2228 /* 0xc0171000 0x100000 0*/
2229 "%19s0x%8p %#8lx %2d\n",
2231 PAGE_SIZE << ent->order,
2233 } while ((idx = ent->link.next) >= 0);
2237 up_read(&exmap_sem);
2242 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
2245 int min_nr_1M = 0, curr_nr_1M = 0;
2246 int min_nr_64K = 0, curr_nr_64K = 0;
2249 if (likely(kmem_pool_1M)) {
2250 min_nr_1M = kmem_pool_1M->min_nr;
2251 curr_nr_1M = kmem_pool_1M->curr_nr;
2252 total += min_nr_1M * SZ_1MB;
2254 if (likely(kmem_pool_64K)) {
2255 min_nr_64K = kmem_pool_64K->min_nr;
2256 curr_nr_64K = kmem_pool_64K->curr_nr;
2257 total += min_nr_64K * SZ_64KB;
2262 "1M buffer: %d (%d free)\n"
2263 "64K buffer: %d (%d free)\n",
2264 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
2268 * workqueue for mmu int
2270 #ifdef CONFIG_ARCH_OMAP1
2273 * We ignore prefetch err.
2275 #define MMUFAULT_MASK \
2276 (DSP_MMU_FAULT_ST_PERM |\
2277 DSP_MMU_FAULT_ST_TLB_MISS |\
2278 DSP_MMU_FAULT_ST_TRANS)
2279 #endif /* CONFIG_ARCH_OMAP1 */
2281 static void do_mmu_int(void)
2283 #if defined(CONFIG_ARCH_OMAP1)
2285 dsp_mmu_reg_t status;
2286 dsp_mmu_reg_t adh, adl;
2289 status = dsp_mmu_read_reg(DSP_MMU_FAULT_ST);
2290 adh = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_H);
2291 adl = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_L);
2292 dp = adh & DSP_MMU_FAULT_AD_H_DP;
2293 dsp_fault_adr = MK32(adh & DSP_MMU_FAULT_AD_H_ADR_MASK, adl);
2295 /* if the fault is masked, nothing to do */
2296 if ((status & MMUFAULT_MASK) == 0) {
2297 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
2299 * note: in OMAP1710,
2300 * when CACHE + DMA domain gets out of idle in DSP,
2301 * MMU interrupt occurs but DSP_MMU_FAULT_ST is not set.
2302 * in this case, we just ignore the interrupt.
2305 printk(KERN_DEBUG "%s%s%s%s\n",
2306 (status & DSP_MMU_FAULT_ST_PREF)?
2307 " (prefetch err)" : "",
2308 (status & DSP_MMU_FAULT_ST_PERM)?
2309 " (permission fault)" : "",
2310 (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2312 (status & DSP_MMU_FAULT_ST_TRANS) ?
2313 " (translation fault)": "");
2314 printk(KERN_DEBUG "fault address = %#08x\n",
2317 enable_irq(INT_DSP_MMU);
2321 #elif defined(CONFIG_ARCH_OMAP2)
2323 dsp_mmu_reg_t status;
2325 status = dsp_mmu_read_reg(DSP_MMU_IRQSTATUS);
2326 dsp_fault_adr = dsp_mmu_read_reg(DSP_MMU_FAULT_AD);
2328 #endif /* CONFIG_ARCH_OMAP2 */
2330 printk(KERN_INFO "DSP MMU interrupt!\n");
2332 #if defined(CONFIG_ARCH_OMAP1)
2334 printk(KERN_INFO "%s%s%s%s\n",
2335 (status & DSP_MMU_FAULT_ST_PREF)?
2336 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PREF)?
2340 (status & DSP_MMU_FAULT_ST_PERM)?
2341 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PERM)?
2342 " permission fault":
2343 " (permission fault)":
2345 (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2346 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TLB_MISS)?
2350 (status & DSP_MMU_FAULT_ST_TRANS)?
2351 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TRANS)?
2352 " translation fault":
2353 " (translation fault)":
2356 #elif defined(CONFIG_ARCH_OMAP2)
2358 printk(KERN_INFO "%s%s%s%s%s\n",
2359 (status & DSP_MMU_IRQ_MULTIHITFAULT)?
2360 (MMU_IRQ_MASK & DSP_MMU_IRQ_MULTIHITFAULT)?
2364 (status & DSP_MMU_IRQ_TABLEWALKFAULT)?
2365 (MMU_IRQ_MASK & DSP_MMU_IRQ_TABLEWALKFAULT)?
2366 " table walk fault":
2367 " (table walk fault)":
2369 (status & DSP_MMU_IRQ_EMUMISS)?
2370 (MMU_IRQ_MASK & DSP_MMU_IRQ_EMUMISS)?
2374 (status & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2375 (MMU_IRQ_MASK & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2376 " translation fault":
2377 " (translation fault)":
2379 (status & DSP_MMU_IRQ_TLBMISS)?
2380 (MMU_IRQ_MASK & DSP_MMU_IRQ_TLBMISS)?
2385 #endif /* CONFIG_ARCH_OMAP2 */
2387 printk(KERN_INFO "fault address = %#08x\n", dsp_fault_adr);
2389 if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
2390 dsp_err_set(ERRCODE_MMU, (unsigned long)dsp_fault_adr);
2392 #ifdef CONFIG_ARCH_OMAP1
2395 printk(KERN_INFO "Resetting DSP...\n");
2396 dsp_cpustat_request(CPUSTAT_RESET);
2398 * if we enable followings, semaphore lock should be avoided.
2400 printk(KERN_INFO "Flushing DSP MMU...\n");
2406 #ifdef CONFIG_ARCH_OMAP2
2408 dsp_mmu_write_reg(status, DSP_MMU_IRQSTATUS);
2412 enable_irq(INT_DSP_MMU);
2415 static DECLARE_WORK(mmu_int_work, (void (*)(void *))do_mmu_int, NULL);
2418 * DSP MMU interrupt handler
2421 static irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id,
2422 struct pt_regs *regs)
2424 disable_irq(INT_DSP_MMU);
2425 schedule_work(&mmu_int_work);
2432 struct file_operations dsp_mem_fops = {
2433 .owner = THIS_MODULE,
2434 .llseek = dsp_mem_lseek,
2435 .read = dsp_mem_read,
2436 .write = dsp_mem_write,
2437 .ioctl = dsp_mem_ioctl,
2438 .mmap = dsp_mem_mmap,
2439 .open = dsp_mem_open,
2442 void dsp_mem_start(void)
2444 #ifdef CONFIG_ARCH_OMAP1
2445 dsp_register_mem_cb(intmem_enable, intmem_disable);
2449 void dsp_mem_stop(void)
2451 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
2452 #ifdef CONFIG_ARCH_OMAP1
2453 dsp_unregister_mem_cb();
2457 static char devid_mmu;
2459 int __init dsp_mem_init(void)
2463 #ifdef CONFIG_ARCH_OMAP2
2464 int dspmem_pg_count;
2466 dspmem_pg_count = dspmem_size >> 12;
2467 for (i = 0; i < dspmem_pg_count; i++) {
2468 dsp_ipi_write_reg(i, DSP_IPI_INDEX);
2469 dsp_ipi_write_reg(DSP_IPI_ENTRY_ELMSIZEVALUE_16, DSP_IPI_ENTRY);
2471 dsp_ipi_write_reg(1, DSP_IPI_ENABLE);
2473 dsp_ipi_write_reg(IOMAP_VAL, DSP_IPI_IOMAP);
2476 for (i = 0; i < DSP_MMU_TLB_LINES; i++)
2477 exmap_tbl[i].valid = 0;
2479 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
2480 if (dspvect_page == NULL) {
2482 "omapdsp: failed to allocate memory "
2483 "for dsp vector table\n");
2487 #ifdef CONFIG_ARCH_OMAP1
2488 dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
2492 * DSP MMU interrupt setup
2494 ret = request_irq(INT_DSP_MMU, dsp_mmu_interrupt, SA_INTERRUPT, "dsp",
2498 "failed to register DSP MMU interrupt: %d\n", ret);
2502 /* MMU interrupt is not enabled until DSP runs */
2503 disable_irq(INT_DSP_MMU);
2505 device_create_file(&dsp_device.dev, &dev_attr_mmu);
2506 device_create_file(&dsp_device.dev, &dev_attr_exmap);
2507 device_create_file(&dsp_device.dev, &dev_attr_mempool);
2512 #ifdef CONFIG_ARCH_OMAP1
2513 dsp_reset_idle_boot_base();
2516 free_page((unsigned long)dspvect_page);
2517 dspvect_page = NULL;
2521 void dsp_mem_exit(void)
2523 free_irq(INT_DSP_MMU, &devid_mmu);
2525 /* recover disable_depth */
2526 enable_irq(INT_DSP_MMU);
2528 #ifdef CONFIG_ARCH_OMAP1
2529 dsp_reset_idle_boot_base();
2534 if (dspvect_page != NULL) {
2535 free_page((unsigned long)dspvect_page);
2536 dspvect_page = NULL;
2539 device_remove_file(&dsp_device.dev, &dev_attr_mmu);
2540 device_remove_file(&dsp_device.dev, &dev_attr_exmap);
2541 device_remove_file(&dsp_device.dev, &dev_attr_mempool);