2 * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
4 * Copyright (C) 2002-2006 Nokia Corporation. All rights reserved.
6 * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
8 * Conversion to mempool API and ARM MMU section mapping
9 * by Paul Mundt <paul.mundt@nokia.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 #include <linux/module.h>
28 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/mempool.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <asm/uaccess.h>
39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h>
41 #include <asm/arch/tc.h>
42 #include <asm/arch/omapfb.h>
43 #include <asm/arch/mailbox.h>
44 #include <asm/arch/dsp_common.h>
45 #include "uaccess_dsp.h"
46 #include "dsp_mbcmd.h"
51 #ifdef CONFIG_ARCH_OMAP2
52 #define IOMAP_VAL 0x3f
57 #define SZ_64KB 0x10000
58 #define SZ_1MB 0x100000
59 #define SZ_16MB 0x1000000
60 #define is_aligned(adr,align) (!((adr)&((align)-1)))
61 #define ORDER_4KB (12 - PAGE_SHIFT)
62 #define ORDER_64KB (16 - PAGE_SHIFT)
63 #define ORDER_1MB (20 - PAGE_SHIFT)
66 * absorb DSP MMU register size and location difference
68 #if defined(CONFIG_ARCH_OMAP1)
69 typedef u16 dsp_mmu_reg_t;
70 #define dsp_mmu_read_reg(a) omap_readw(a)
71 #define dsp_mmu_write_reg(v,a) omap_writew(v,a)
72 #elif defined(CONFIG_ARCH_OMAP2)
73 typedef u32 dsp_mmu_reg_t;
74 #define dsp_mmu_read_reg(a) readl(a)
75 #define dsp_mmu_write_reg(v,a) writel(v,a)
76 #define dsp_ipi_read_reg(a) readl(a)
77 #define dsp_ipi_write_reg(v,a) writel(v,a)
80 #if defined(CONFIG_ARCH_OMAP1)
82 #define dsp_mmu_enable() \
84 dsp_mmu_write_reg(DSP_MMU_CNTL_MMU_EN | DSP_MMU_CNTL_RESET_SW, \
87 #define dsp_mmu_disable() \
89 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
91 #define __dsp_mmu_itack() \
93 dsp_mmu_write_reg(DSP_MMU_IT_ACK_IT_ACK, DSP_MMU_IT_ACK); \
96 #elif defined(CONFIG_ARCH_OMAP2)
98 #define dsp_mmu_enable() \
100 dsp_mmu_write_reg(DSP_MMU_CNTL_MMUENABLE, DSP_MMU_CNTL); \
102 #define dsp_mmu_disable() \
104 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
106 #define dsp_mmu_reset() \
108 dsp_mmu_write_reg(dsp_mmu_read_reg(DSP_MMU_SYSCONFIG) | \
109 DSP_MMU_SYSCONFIG_SOFTRESET, \
110 DSP_MMU_SYSCONFIG); \
113 #endif /* CONFIG_ARCH_OMAP2 */
115 #define dsp_mmu_flush() \
117 dsp_mmu_write_reg(DSP_MMU_FLUSH_ENTRY_FLUSH_ENTRY, \
118 DSP_MMU_FLUSH_ENTRY); \
120 #define __dsp_mmu_gflush() \
122 dsp_mmu_write_reg(DSP_MMU_GFLUSH_GFLUSH, DSP_MMU_GFLUSH); \
126 * absorb register name difference
128 #ifdef CONFIG_ARCH_OMAP1
129 #define DSP_MMU_CAM_P DSP_MMU_CAM_L_P
130 #define DSP_MMU_CAM_V DSP_MMU_CAM_L_V
131 #define DSP_MMU_CAM_PAGESIZE_MASK DSP_MMU_CAM_L_PAGESIZE_MASK
132 #define DSP_MMU_CAM_PAGESIZE_1MB DSP_MMU_CAM_L_PAGESIZE_1MB
133 #define DSP_MMU_CAM_PAGESIZE_64KB DSP_MMU_CAM_L_PAGESIZE_64KB
134 #define DSP_MMU_CAM_PAGESIZE_4KB DSP_MMU_CAM_L_PAGESIZE_4KB
135 #define DSP_MMU_CAM_PAGESIZE_1KB DSP_MMU_CAM_L_PAGESIZE_1KB
136 #endif /* CONFIG_ARCH_OMAP1 */
141 #ifdef CONFIG_ARCH_OMAP1
142 #define EMIF_PRIO_LB_MASK 0x0000f000
143 #define EMIF_PRIO_LB_SHIFT 12
144 #define EMIF_PRIO_DMA_MASK 0x00000f00
145 #define EMIF_PRIO_DMA_SHIFT 8
146 #define EMIF_PRIO_DSP_MASK 0x00000070
147 #define EMIF_PRIO_DSP_SHIFT 4
148 #define EMIF_PRIO_MPU_MASK 0x00000007
149 #define EMIF_PRIO_MPU_SHIFT 0
150 #define set_emiff_dma_prio(prio) \
152 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
153 ~EMIF_PRIO_DMA_MASK) | \
154 ((prio) << EMIF_PRIO_DMA_SHIFT), \
155 OMAP_TC_OCPT1_PRIOR); \
157 #endif /* CONFIG_ARCH_OMAP1 */
164 struct exmap_tbl_entry {
165 unsigned int valid:1;
166 unsigned int prsvd:1; /* preserved */
167 int usecount; /* reference count by mmap */
168 enum exmap_type_e type;
169 void *buf; /* virtual address of the buffer,
170 * i.e. 0xc0000000 - */
171 void *vadr; /* DSP shadow space,
172 * i.e. 0xe0000000 - 0xe0ffffff */
177 } link; /* grouping */
180 #define INIT_EXMAP_TBL_ENTRY(ent,b,v,typ,od) \
186 (ent)->usecount = 0; \
187 (ent)->type = (typ); \
188 (ent)->order = (od); \
189 (ent)->link.next = -1; \
190 (ent)->link.prev = -1; \
193 #define INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(ent,b,v) \
199 (ent)->usecount = 0; \
200 (ent)->type = EXMAP_TYPE_MEM; \
202 (ent)->link.next = -1; \
203 (ent)->link.prev = -1; \
206 #define DSP_MMU_TLB_LINES 32
207 static struct exmap_tbl_entry exmap_tbl[DSP_MMU_TLB_LINES];
208 static int exmap_preserved_cnt;
209 static DECLARE_RWSEM(exmap_sem);
211 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
212 static struct omapfb_notifier_block *omapfb_nb;
213 static int omapfb_ready;
216 struct cam_ram_regset {
217 #if defined(CONFIG_ARCH_OMAP1)
222 #elif defined(CONFIG_ARCH_OMAP2)
231 dsp_mmu_reg_t pgsz, prsvd, valid;
232 #if defined(CONFIG_ARCH_OMAP1)
234 #elif defined(CONFIG_ARCH_OMAP2)
235 dsp_mmu_reg_t endian, elsz, mixed;
239 #if defined(CONFIG_ARCH_OMAP1)
240 #define INIT_TLB_ENTRY(ent,v,p,ps) \
244 (ent)->pgsz = (ps); \
246 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
248 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
252 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
253 (ent)->prsvd = DSP_MMU_CAM_P; \
254 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
256 #elif defined(CONFIG_ARCH_OMAP2)
257 #define INIT_TLB_ENTRY(ent,v,p,ps) \
261 (ent)->pgsz = (ps); \
263 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
264 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
267 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
271 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
272 (ent)->prsvd = DSP_MMU_CAM_P; \
273 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
274 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
277 #define INIT_TLB_ENTRY_4KB_ES32_PRESERVED(ent,v,p) \
281 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
282 (ent)->prsvd = DSP_MMU_CAM_P; \
283 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
284 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_32; \
289 #if defined(CONFIG_ARCH_OMAP1)
290 #define cam_ram_valid(cr) ((cr).cam_l & DSP_MMU_CAM_V)
291 #elif defined(CONFIG_ARCH_OMAP2)
292 #define cam_ram_valid(cr) ((cr).cam & DSP_MMU_CAM_V)
300 static int dsp_exunmap(dsp_long_t dspadr);
302 static void *dspvect_page;
303 static u32 dsp_fault_adr;
304 static struct mem_sync_struct mem_sync;
306 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
308 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
310 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
313 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
314 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
315 static struct device_attribute dev_attr_mempool = __ATTR_RO(mempool);
318 * special mempool function:
319 * hope this goes to mm/mempool.c
321 static void *mempool_alloc_from_pool(mempool_t *pool, gfp_t gfp_mask)
325 spin_lock_irqsave(&pool->lock, flags);
326 if (likely(pool->curr_nr)) {
327 void *element = pool->elements[--pool->curr_nr];
328 spin_unlock_irqrestore(&pool->lock, flags);
331 spin_unlock_irqrestore(&pool->lock, flags);
333 return mempool_alloc(pool, gfp_mask);
336 static __inline__ unsigned long lineup_offset(unsigned long adr,
340 unsigned long newadr;
342 newadr = (adr & ~mask) | (ref & mask);
348 int dsp_mem_sync_inc(void)
350 if (dsp_mem_enable((void *)dspmem_base) < 0)
353 mem_sync.DARAM->ad_arm++;
355 mem_sync.SARAM->ad_arm++;
357 mem_sync.SDRAM->ad_arm++;
358 dsp_mem_disable((void *)dspmem_base);
363 * dsp_mem_sync_config() is called from mbox1 workqueue
365 int dsp_mem_sync_config(struct mem_sync_struct *sync)
367 size_t sync_seq_sz = sizeof(struct sync_seq);
369 #ifdef OLD_BINARY_SUPPORT
371 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
375 if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
376 (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
377 (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
379 "omapdsp: mem_sync address validation failure!\n"
380 " mem_sync.DARAM = 0x%p,\n"
381 " mem_sync.SARAM = 0x%p,\n"
382 " mem_sync.SDRAM = 0x%p,\n",
383 sync->DARAM, sync->SARAM, sync->SDRAM);
386 memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
390 static mempool_t *kmem_pool_1M;
391 static mempool_t *kmem_pool_64K;
393 static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
395 return (void *)__get_dma_pages(gfp, (unsigned int)order);
398 static void dsp_pool_free(void *buf, void *order)
400 free_pages((unsigned long)buf, (unsigned int)order);
403 static void dsp_kmem_release(void)
406 mempool_destroy(kmem_pool_64K);
407 kmem_pool_64K = NULL;
411 mempool_destroy(kmem_pool_1M);
416 static int dsp_kmem_reserve(unsigned long size)
418 unsigned long len = size;
420 /* alignment check */
421 if (!is_aligned(size, SZ_64KB)) {
423 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
427 if (size > DSPSPACE_SIZE) {
429 "omapdsp: size(0x%lx) is larger than DSP memory space "
430 "size (0x%x.\n", size, DSPSPACE_SIZE);
434 if (size >= SZ_1MB) {
437 if (likely(!kmem_pool_1M))
438 kmem_pool_1M = mempool_create(nr,
443 mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
446 size &= ~(0xf << 20);
449 if (size >= SZ_64KB) {
452 if (likely(!kmem_pool_64K))
453 kmem_pool_64K = mempool_create(nr,
458 mempool_resize(kmem_pool_64K,
459 kmem_pool_64K->min_nr + nr, GFP_KERNEL);
461 size &= ~(0xf << 16);
470 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
472 struct page *page, *ps, *pe;
474 ps = virt_to_page(buf);
475 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
477 for (page = ps; page < pe; page++)
478 ClearPageReserved(page);
480 if ((order == ORDER_64KB) && likely(kmem_pool_64K))
481 mempool_free((void *)buf, kmem_pool_64K);
482 else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
483 mempool_free((void *)buf, kmem_pool_1M);
485 free_pages(buf, order);
489 exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
496 pgd = pgd_offset_k(virt);
497 pud = pud_offset(pgd, virt);
498 pmd = pmd_offset(pud, virt);
500 if (pmd_none(*pmd)) {
501 pte = pte_alloc_one_kernel(&init_mm, 0);
505 /* note: two PMDs will be set */
506 pmd_populate_kernel(&init_mm, pmd, pte);
509 pte = pte_offset_kernel(pmd, virt);
510 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
515 exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
521 pgd = pgd_offset_k(virt);
522 pud = pud_alloc(&init_mm, pgd, virt);
523 pmd = pmd_alloc(&init_mm, pud, virt);
525 if (virt & (1 << 20))
529 /* No good, fall back on smaller mappings. */
532 *pmd = __pmd(phys | prot);
533 flush_pmd_entry(pmd);
542 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
550 "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
553 prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
554 L_PTE_DIRTY | L_PTE_WRITE);
556 prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
557 PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
559 if (cpu_architecture() <= CPU_ARCH_ARMv5)
560 prot_sect |= PMD_BIT4;
564 while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
565 exmap_alloc_pte(virt, virt + off, prot_pte);
571 /* XXX: Not yet.. confuses dspfb -- PFM. */
573 while (size >= (PGDIR_SIZE / 2)) {
574 if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
577 virt += (PGDIR_SIZE / 2);
578 size -= (PGDIR_SIZE / 2);
582 while (size >= PAGE_SIZE) {
583 exmap_alloc_pte(virt, virt + off, prot_pte);
595 * A process can have old mappings. if we want to clear a pmd,
596 * we need to do it for all proceeses that use the old mapping.
600 exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
604 pte = pte_offset_map(pmd, addr);
609 pte_clear(&init_mm, addr, pte);
610 } while (pte++, addr += PAGE_SIZE, addr != end);
616 exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
621 pmd = pmd_offset(pud, addr);
623 next = pmd_addr_end(addr, end);
625 if (addr & (1 << 20))
628 if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
630 clean_pmd_entry(pmd);
634 if (pmd_none_or_clear_bad(pmd))
637 exmap_clear_pte_range(pmd, addr, next);
638 } while (pmd++, addr = next, addr != end);
642 exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
647 pud = pud_offset(pgd, addr);
649 next = pud_addr_end(addr, end);
650 if (pud_none_or_clear_bad(pud))
653 exmap_clear_pmd_range(pud, addr, next);
654 } while (pud++, addr = next, addr != end);
658 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
661 unsigned long next, end;
665 "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
668 pgd = pgd_offset_k(virt);
671 next = pgd_addr_end(virt, end);
672 if (pgd_none_or_clear_bad(pgd))
675 exmap_clear_pud_range(pgd, virt, next);
676 } while (pgd++, virt = next, virt != end);
684 "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
687 while (size >= PAGE_SIZE) {
688 pgd = pgd_offset_k(virt);
689 pud = pud_offset(pgd, virt);
690 pmd = pmd_offset(pud, virt);
691 pte = pte_offset_kernel(pmd, virt);
693 pte_clear(&init_mm, virt, pte);
702 static int exmap_valid(void *vadr, size_t len)
704 /* exmap_sem should be held before calling this function */
708 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
710 unsigned long mapsize;
711 struct exmap_tbl_entry *ent = &exmap_tbl[i];
715 mapadr = (void *)ent->vadr;
716 mapsize = 1 << (ent->order + PAGE_SHIFT);
717 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
718 if (vadr + len <= mapadr + mapsize) {
719 /* this map covers whole address. */
723 * this map covers partially.
724 * check rest portion.
726 len -= mapadr + mapsize - vadr;
727 vadr = mapadr + mapsize;
736 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
738 void *ds = (void *)daram_base;
739 void *de = (void *)daram_base + daram_size;
740 void *ss = (void *)saram_base;
741 void *se = (void *)saram_base + saram_size;
744 if ((vadr >= ds) && (vadr < de)) {
746 return MEM_TYPE_CROSSING;
748 return MEM_TYPE_DARAM;
749 } else if ((vadr >= ss) && (vadr < se)) {
751 return MEM_TYPE_CROSSING;
753 return MEM_TYPE_SARAM;
755 down_read(&exmap_sem);
756 if (exmap_valid(vadr, len))
757 ret = MEM_TYPE_EXTERN;
765 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
767 if (dsp_mem_type(p, len) <= 0) {
773 vsprintf(s, fmt, args);
776 "omapdsp: %s address(0x%p) and size(0x%x) is "
778 " (crossing different type of memories, or \n"
779 " external memory space where no "
780 "actual memory is mapped)\n",
790 * exmap_use(), unuse():
791 * when the mapped area is exported to user space with mmap,
792 * the usecount is incremented.
793 * while the usecount > 0, that area can't be released.
795 void exmap_use(void *vadr, size_t len)
799 down_write(&exmap_sem);
800 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
802 unsigned long mapsize;
803 struct exmap_tbl_entry *ent = &exmap_tbl[i];
807 mapadr = (void *)ent->vadr;
808 mapsize = 1 << (ent->order + PAGE_SHIFT);
809 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
812 up_write(&exmap_sem);
815 void exmap_unuse(void *vadr, size_t len)
819 down_write(&exmap_sem);
820 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
822 unsigned long mapsize;
823 struct exmap_tbl_entry *ent = &exmap_tbl[i];
827 mapadr = (void *)ent->vadr;
828 mapsize = 1 << (ent->order + PAGE_SHIFT);
829 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
832 up_write(&exmap_sem);
837 * returns physical address, and sets len to valid length
839 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
843 if (is_dsp_internal_mem(vadr)) {
845 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
846 return (unsigned long)vadr;
850 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
852 unsigned long mapsize;
853 struct exmap_tbl_entry *ent = &exmap_tbl[i];
857 mapadr = (void *)ent->vadr;
858 mapsize = 1 << (ent->order + PAGE_SHIFT);
859 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
860 *len = mapadr + mapsize - vadr;
861 return __pa(ent->buf) + vadr - mapadr;
865 /* valid mapping not found */
872 #ifdef CONFIG_ARCH_OMAP1
873 static dsp_mmu_reg_t get_cam_l_va_mask(dsp_mmu_reg_t pgsz)
876 case DSP_MMU_CAM_PAGESIZE_1MB:
877 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
878 DSP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
879 case DSP_MMU_CAM_PAGESIZE_64KB:
880 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
881 DSP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
882 case DSP_MMU_CAM_PAGESIZE_4KB:
883 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
884 DSP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
885 case DSP_MMU_CAM_PAGESIZE_1KB:
886 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
887 DSP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
891 #endif /* CONFIG_ARCH_OMAP1 */
893 #if defined(CONFIG_ARCH_OMAP1)
894 #define get_cam_va_mask(pgsz) \
895 ((u32)DSP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
896 (u32)get_cam_l_va_mask(pgsz) << 6)
897 #elif defined(CONFIG_ARCH_OMAP2)
898 #define get_cam_va_mask(pgsz) \
899 ((pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
900 (pgsz == DSP_MMU_CAM_PAGESIZE_1MB) ? 0xfff00000 : \
901 (pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
902 (pgsz == DSP_MMU_CAM_PAGESIZE_4KB) ? 0xfffff000 : 0)
903 #endif /* CONFIG_ARCH_OMAP2 */
905 static void get_tlb_lock(struct tlb_lock *tlb_lock)
907 dsp_mmu_reg_t lock = dsp_mmu_read_reg(DSP_MMU_LOCK);
909 tlb_lock->base = (lock & DSP_MMU_LOCK_BASE_MASK) >>
910 DSP_MMU_LOCK_BASE_SHIFT;
911 tlb_lock->victim = (lock & DSP_MMU_LOCK_VICTIM_MASK) >>
912 DSP_MMU_LOCK_VICTIM_SHIFT;
915 static void set_tlb_lock(struct tlb_lock *tlb_lock)
917 dsp_mmu_write_reg((tlb_lock->base << DSP_MMU_LOCK_BASE_SHIFT) |
918 (tlb_lock->victim << DSP_MMU_LOCK_VICTIM_SHIFT),
922 static void __read_tlb(struct tlb_lock *tlb_lock, struct cam_ram_regset *cr)
925 set_tlb_lock(tlb_lock);
927 #if defined(CONFIG_ARCH_OMAP1)
928 /* read a TLB entry */
929 dsp_mmu_write_reg(DSP_MMU_LD_TLB_RD, DSP_MMU_LD_TLB);
931 cr->cam_h = dsp_mmu_read_reg(DSP_MMU_READ_CAM_H);
932 cr->cam_l = dsp_mmu_read_reg(DSP_MMU_READ_CAM_L);
933 cr->ram_h = dsp_mmu_read_reg(DSP_MMU_READ_RAM_H);
934 cr->ram_l = dsp_mmu_read_reg(DSP_MMU_READ_RAM_L);
935 #elif defined(CONFIG_ARCH_OMAP2)
936 cr->cam = dsp_mmu_read_reg(DSP_MMU_READ_CAM);
937 cr->ram = dsp_mmu_read_reg(DSP_MMU_READ_RAM);
941 static void __load_tlb(struct cam_ram_regset *cr)
943 #if defined(CONFIG_ARCH_OMAP1)
944 dsp_mmu_write_reg(cr->cam_h, DSP_MMU_CAM_H);
945 dsp_mmu_write_reg(cr->cam_l, DSP_MMU_CAM_L);
946 dsp_mmu_write_reg(cr->ram_h, DSP_MMU_RAM_H);
947 dsp_mmu_write_reg(cr->ram_l, DSP_MMU_RAM_L);
948 #elif defined(CONFIG_ARCH_OMAP2)
949 dsp_mmu_write_reg(cr->cam | DSP_MMU_CAM_V, DSP_MMU_CAM);
950 dsp_mmu_write_reg(cr->ram, DSP_MMU_RAM);
953 /* flush the entry */
956 /* load a TLB entry */
957 dsp_mmu_write_reg(DSP_MMU_LD_TLB_LD, DSP_MMU_LD_TLB);
960 static int dsp_mmu_load_tlb(struct tlb_entry *tlb_ent)
962 struct tlb_lock tlb_lock;
963 struct cam_ram_regset cr;
965 #ifdef CONFIG_ARCH_OMAP1
966 clk_enable(dsp_ck_handle);
967 omap_dsp_request_mem();
970 get_tlb_lock(&tlb_lock);
971 for (tlb_lock.victim = 0;
972 tlb_lock.victim < tlb_lock.base;
974 struct cam_ram_regset tmp_cr;
976 /* read a TLB entry */
977 __read_tlb(&tlb_lock, &tmp_cr);
978 if (!cam_ram_valid(tmp_cr))
981 set_tlb_lock(&tlb_lock);
984 /* The last (31st) entry cannot be locked? */
985 if (tlb_lock.victim == 31) {
986 printk(KERN_ERR "omapdsp: TLB is full.\n");
990 if (tlb_ent->va & ~get_cam_va_mask(tlb_ent->pgsz)) {
992 "omapdsp: mapping vadr (0x%06x) is not "
993 "aligned boundary\n", tlb_ent->va);
997 #if defined(CONFIG_ARCH_OMAP1)
998 cr.cam_h = tlb_ent->va >> 22;
999 cr.cam_l = (tlb_ent->va >> 6 & get_cam_l_va_mask(tlb_ent->pgsz)) |
1000 tlb_ent->prsvd | tlb_ent->pgsz;
1001 cr.ram_h = tlb_ent->pa >> 16;
1002 cr.ram_l = (tlb_ent->pa & DSP_MMU_RAM_L_RAM_LSB_MASK) | tlb_ent->ap;
1003 #elif defined(CONFIG_ARCH_OMAP2)
1004 cr.cam = (tlb_ent->va & DSP_MMU_CAM_VATAG_MASK) |
1005 tlb_ent->prsvd | tlb_ent->pgsz;
1006 cr.ram = tlb_ent->pa | tlb_ent->endian | tlb_ent->elsz;
1010 /* update lock base */
1011 if (tlb_lock.victim == tlb_lock.base)
1013 tlb_lock.victim = tlb_lock.base;
1014 set_tlb_lock(&tlb_lock);
1016 #ifdef CONFIG_ARCH_OMAP1
1017 omap_dsp_release_mem();
1018 clk_disable(dsp_ck_handle);
1023 static int dsp_mmu_clear_tlb(dsp_long_t vadr)
1025 struct tlb_lock tlb_lock;
1029 #ifdef CONFIG_ARCH_OMAP1
1030 clk_enable(dsp_ck_handle);
1031 omap_dsp_request_mem();
1034 get_tlb_lock(&tlb_lock);
1035 for (i = 0; i < tlb_lock.base; i++) {
1036 struct cam_ram_regset cr;
1040 /* read a TLB entry */
1041 tlb_lock.victim = i;
1042 __read_tlb(&tlb_lock, &cr);
1043 if (!cam_ram_valid(cr))
1046 #if defined(CONFIG_ARCH_OMAP1)
1047 pgsz = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
1048 cam_va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
1049 (u32)(cr.cam_l & get_cam_l_va_mask(pgsz)) << 6;
1050 #elif defined(CONFIG_ARCH_OMAP2)
1051 pgsz = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
1052 cam_va = cr.cam & get_cam_va_mask(pgsz);
1056 /* flush the entry */
1062 /* set new lock base */
1063 tlb_lock.base = max_valid + 1;
1064 tlb_lock.victim = max_valid + 1;
1065 set_tlb_lock(&tlb_lock);
1067 #ifdef CONFIG_ARCH_OMAP1
1068 omap_dsp_release_mem();
1069 clk_disable(dsp_ck_handle);
1074 static void dsp_mmu_gflush(void)
1076 struct tlb_lock tlb_lock;
1078 #ifdef CONFIG_ARCH_OMAP1
1079 clk_enable(dsp_ck_handle);
1080 omap_dsp_request_mem();
1084 tlb_lock.base = exmap_preserved_cnt;
1085 tlb_lock.victim = exmap_preserved_cnt;
1086 set_tlb_lock(&tlb_lock);
1088 #ifdef CONFIG_ARCH_OMAP1
1089 omap_dsp_release_mem();
1090 clk_disable(dsp_ck_handle);
1097 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
1098 * In this case, the buffer for DSP is allocated in this routine,
1099 * then it is mapped.
1100 * On the other hand, for example - frame buffer sharing, calls
1101 * this function with padr set. It means some known address space
1102 * pointed with padr is going to be shared with DSP.
1104 static int dsp_exmap(dsp_long_t dspadr, unsigned long padr, unsigned long size,
1105 enum exmap_type_e type)
1109 unsigned int order = 0;
1112 dsp_long_t _dspadr = dspadr;
1113 unsigned long _padr = padr;
1114 void *_vadr = dspbyte_to_virt(dspadr);
1115 unsigned long _size = size;
1116 struct tlb_entry tlb_ent;
1117 struct exmap_tbl_entry *exmap_ent;
1122 #define MINIMUM_PAGESZ SZ_4KB
1126 if (!is_aligned(size, MINIMUM_PAGESZ)) {
1128 "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
1131 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
1133 "omapdsp: DSP address(0x%x) is not aligned.\n", dspadr);
1136 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
1138 "omapdsp: physical address(0x%lx) is not aligned.\n",
1143 /* address validity check */
1144 if ((dspadr < dspmem_size) ||
1145 (dspadr >= DSPSPACE_SIZE) ||
1146 ((dspadr + size > DSP_INIT_PAGE) &&
1147 (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
1149 "omapdsp: illegal address/size for dsp_exmap().\n");
1153 down_write(&exmap_sem);
1156 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1157 unsigned long mapsize;
1158 struct exmap_tbl_entry *tmp_ent = &exmap_tbl[i];
1160 if (!tmp_ent->valid)
1162 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
1163 if ((_vadr + size > tmp_ent->vadr) &&
1164 (_vadr < tmp_ent->vadr + mapsize)) {
1165 printk(KERN_ERR "omapdsp: exmap page overlap!\n");
1166 up_write(&exmap_sem);
1173 /* Are there any free TLB lines? */
1174 for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1175 if (!exmap_tbl[idx].valid)
1178 printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
1183 exmap_ent = &exmap_tbl[idx];
1187 * 1KB mapping in OMAP1,
1188 * 16MB mapping in OMAP2.
1190 if ((_size >= SZ_1MB) &&
1191 (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
1192 is_aligned(_dspadr, SZ_1MB)) {
1194 pgsz = DSP_MMU_CAM_PAGESIZE_1MB;
1195 } else if ((_size >= SZ_64KB) &&
1196 (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
1197 is_aligned(_dspadr, SZ_64KB)) {
1199 pgsz = DSP_MMU_CAM_PAGESIZE_64KB;
1202 pgsz = DSP_MMU_CAM_PAGESIZE_4KB;
1205 order = get_order(unit);
1207 /* buffer allocation */
1208 if (type == EXMAP_TYPE_MEM) {
1209 struct page *page, *ps, *pe;
1211 if ((order == ORDER_1MB) && likely(kmem_pool_1M))
1212 buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
1213 else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
1214 buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
1216 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
1223 /* mark the pages as reserved; this is needed for mmap */
1224 ps = virt_to_page(buf);
1225 pe = virt_to_page(buf + unit);
1227 for (page = ps; page < pe; page++)
1228 SetPageReserved(page);
1234 * mapping for ARM MMU:
1235 * we should not access to the allocated memory through 'buf'
1236 * since this area should not be cashed.
1238 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
1242 /* loading DSP TLB entry */
1243 INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
1244 status = dsp_mmu_load_tlb(&tlb_ent);
1246 exmap_clear_armmmu((unsigned long)_vadr, unit);
1250 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
1251 exmap_ent->link.prev = prev;
1253 exmap_tbl[prev].link.next = idx;
1255 if ((_size -= unit) == 0) { /* normal completion */
1256 up_write(&exmap_sem);
1262 _padr = padr ? _padr + unit : 0;
1267 up_write(&exmap_sem);
1269 dsp_mem_free_pages((unsigned long)buf, order);
1270 dsp_exunmap(dspadr);
1274 static unsigned long unmap_free_arm(struct exmap_tbl_entry *ent)
1278 /* clearing ARM MMU */
1279 size = 1 << (ent->order + PAGE_SHIFT);
1280 exmap_clear_armmmu((unsigned long)ent->vadr, size);
1282 /* freeing allocated memory */
1283 if (ent->type == EXMAP_TYPE_MEM) {
1284 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
1286 "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
1289 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1290 else if (ent->type == EXMAP_TYPE_FB) {
1293 status = omapfb_unregister_client(omapfb_nb);
1295 printk("omapfb_unregister_client(): "
1298 printk("omapfb_runegister_client(): "
1299 "failure(%d)\n", status);
1310 static int dsp_exunmap(dsp_long_t dspadr)
1315 struct exmap_tbl_entry *ent;
1318 vadr = dspbyte_to_virt(dspadr);
1319 down_write(&exmap_sem);
1320 for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1321 ent = &exmap_tbl[idx];
1322 if ((!ent->valid) || ent->prsvd)
1324 if (ent->vadr == vadr)
1327 up_write(&exmap_sem);
1329 "omapdsp: address %06x not found in exmap_tbl.\n", dspadr);
1333 if (ent->usecount > 0) {
1335 "omapdsp: exmap reference count is not 0.\n"
1336 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
1337 idx, ent->vadr, ent->order, ent->usecount);
1338 up_write(&exmap_sem);
1341 /* clearing DSP TLB entry */
1342 dsp_mmu_clear_tlb(dspadr);
1344 /* clear ARM MMU and free buffer */
1345 size = unmap_free_arm(ent);
1349 /* we don't free PTEs */
1352 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
1354 if ((idx = ent->link.next) < 0)
1355 goto up_out; /* normal completion */
1356 ent = &exmap_tbl[idx];
1359 if (ent->vadr == vadr)
1360 goto found_map; /* continue */
1363 "omapdsp: illegal exmap_tbl grouping!\n"
1364 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1365 vadr, idx, ent->vadr);
1366 up_write(&exmap_sem);
1370 up_write(&exmap_sem);
1374 static void exmap_flush(void)
1376 struct exmap_tbl_entry *ent;
1379 down_write(&exmap_sem);
1381 /* clearing DSP TLB entry */
1384 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1385 ent = &exmap_tbl[i];
1386 if (ent->valid && (!ent->prsvd)) {
1387 unmap_free_arm(ent);
1393 flush_tlb_kernel_range(dspmem_base + dspmem_size,
1394 dspmem_base + DSPSPACE_SIZE);
1395 up_write(&exmap_sem);
1398 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1400 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1401 #endif /* CONFIG_FB */
1403 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1404 static int omapfb_notifier_cb(struct notifier_block *omapfb_nb,
1405 unsigned long event, void *fbi)
1408 printk("omapfb_notifier_cb(): event = %s\n",
1409 (event == OMAPFB_EVENT_READY) ? "READY" :
1410 (event == OMAPFB_EVENT_DISABLED) ? "DISABLED" : "Unknown");
1411 if (event == OMAPFB_EVENT_READY)
1413 else if (event == OMAPFB_EVENT_DISABLED)
1419 static int dsp_fbexport(dsp_long_t *dspadr)
1421 dsp_long_t dspadr_actual;
1422 unsigned long padr_sys, padr, fbsz_sys, fbsz;
1424 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1428 printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1430 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1433 "omapdsp: frame buffer has been exported already!\n");
1438 if (num_registered_fb == 0) {
1439 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1442 if (num_registered_fb != 1) {
1444 "omapdsp: %d frame buffers found. we use first one.\n",
1447 padr_sys = registered_fb[0]->fix.smem_start;
1448 fbsz_sys = registered_fb[0]->fix.smem_len;
1449 if (fbsz_sys == 0) {
1451 "omapdsp: framebuffer doesn't seem to be configured "
1452 "correctly! (size=0)\n");
1457 * align padr and fbsz to 4kB boundary
1458 * (should be noted to the user afterwards!)
1460 padr = padr_sys & ~(SZ_4KB-1);
1461 fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1463 /* line up dspadr offset with padr */
1465 (fbsz > SZ_1MB) ? lineup_offset(*dspadr, padr, SZ_1MB-1) :
1466 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1467 /* (fbsz > SZ_4KB) ? */ *dspadr;
1468 if (dspadr_actual != *dspadr)
1470 "omapdsp: actual dspadr for FBEXPORT = %08x\n",
1472 *dspadr = dspadr_actual;
1474 cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1476 printk(KERN_ERR "omapdsp: exmap failure.\n");
1480 if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1482 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1483 " !! screen base address or size is not aligned in 4kB: !!\n"
1484 " !! actual screen adr = %08lx, size = %08lx !!\n"
1485 " !! exporting adr = %08lx, size = %08lx !!\n"
1486 " !! Make sure that the framebuffer is allocated with 4kB-order! !!\n"
1487 " !! Otherwise DSP can corrupt the kernel memory. !!\n"
1488 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1489 padr_sys, fbsz_sys, padr, fbsz);
1492 #ifdef CONFIG_ARCH_OMAP1
1493 /* increase the DMA priority */
1494 set_emiff_dma_prio(15);
1497 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1498 omapfb_nb = kmalloc(sizeof(struct omapfb_notifier_block), GFP_KERNEL);
1499 if (omapfb_nb == NULL) {
1501 "omapdsp: failed to allocate memory for omapfb_nb!\n");
1502 dsp_exunmap(dspadr_actual);
1505 status = omapfb_register_client(omapfb_nb, omapfb_notifier_cb, NULL);
1507 printk("omapfb_register_client(): success\n");
1509 printk("omapfb_register_client(): failure(%d)\n", status);
1515 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1517 static int dsp_fbexport(dsp_long_t *dspadr)
1519 printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1523 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1525 static void exmap_setup_preserved_mem_page(void *buf, dsp_long_t dspadr,
1530 struct tlb_entry tlb_ent;
1533 virt = dspbyte_to_virt(dspadr);
1534 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1535 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], buf, virt);
1536 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1537 dsp_mmu_load_tlb(&tlb_ent);
1540 static void exmap_clear_mem_page(dsp_long_t dspadr)
1544 virt = dspbyte_to_virt(dspadr);
1545 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1546 /* DSP MMU is shutting down. not handled here. */
1549 #ifdef CONFIG_ARCH_OMAP2
1550 static void exmap_setup_iomap_page(unsigned long phys, unsigned long dsp_io_adr,
1555 struct tlb_entry tlb_ent;
1557 dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1558 virt = dspbyte_to_virt(dspadr);
1559 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1560 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], NULL, virt);
1561 INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
1562 dsp_mmu_load_tlb(&tlb_ent);
1565 static void exmap_clear_iomap_page(unsigned long dsp_io_adr)
1570 dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1571 virt = dspbyte_to_virt(dspadr);
1572 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1573 /* DSP MMU is shutting down. not handled here. */
1575 #endif /* CONFIG_ARCH_OMAP2 */
1577 #define OMAP2420_GPT5_BASE (L4_24XX_BASE + 0x7c000)
1578 #define OMAP2420_GPT6_BASE (L4_24XX_BASE + 0x7e000)
1579 #define OMAP2420_GPT7_BASE (L4_24XX_BASE + 0x80000)
1580 #define OMAP2420_GPT8_BASE (L4_24XX_BASE + 0x82000)
1581 #define OMAP24XX_EAC_BASE (L4_24XX_BASE + 0x90000)
1583 static int exmap_setup_preserved_entries(void)
1587 exmap_setup_preserved_mem_page(dspvect_page, DSP_INIT_PAGE, n++);
1588 #ifdef CONFIG_ARCH_OMAP2
1589 exmap_setup_iomap_page(OMAP24XX_PRCM_BASE, 0x7000, n++);
1590 #ifdef CONFIG_ARCH_OMAP2420
1591 exmap_setup_iomap_page(OMAP2420_GPT5_BASE, 0xe000, n++);
1592 exmap_setup_iomap_page(OMAP2420_GPT6_BASE, 0xe800, n++);
1593 exmap_setup_iomap_page(OMAP2420_GPT7_BASE, 0xf000, n++);
1594 exmap_setup_iomap_page(OMAP2420_GPT8_BASE, 0xf800, n++);
1595 #endif /* CONFIG_ARCH_OMAP2420 */
1596 exmap_setup_iomap_page(OMAP24XX_EAC_BASE, 0x10000, n++);
1597 exmap_setup_iomap_page(OMAP24XX_MAILBOX_BASE, 0x11000, n++);
1598 #endif /* CONFIG_ARCH_OMAP2 */
1603 static void exmap_clear_preserved_entries(void)
1605 exmap_clear_mem_page(DSP_INIT_PAGE);
1606 #ifdef CONFIG_ARCH_OMAP2
1607 exmap_clear_iomap_page(0x7000); /* PRCM */
1608 #ifdef CONFIG_ARCH_OMAP2420
1609 exmap_clear_iomap_page(0xe000); /* GPT5 */
1610 exmap_clear_iomap_page(0xe800); /* GPT6 */
1611 exmap_clear_iomap_page(0xf000); /* GPT7 */
1612 exmap_clear_iomap_page(0xf800); /* GPT8 */
1613 #endif /* CONFIG_ARCH_OMAP2420 */
1614 exmap_clear_iomap_page(0x10000); /* EAC */
1615 exmap_clear_iomap_page(0x11000); /* MAILBOX */
1616 #endif /* CONFIG_ARCH_OMAP2 */
1619 #ifdef CONFIG_ARCH_OMAP1
1620 static int dsp_mmu_itack(void)
1622 unsigned long dspadr;
1624 printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1625 if (!dsp_err_isset(ERRCODE_MMU)) {
1626 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1629 dspadr = dsp_fault_adr & ~(SZ_4K-1);
1630 dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM); /* FIXME: reserve TLB entry for this */
1631 printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1632 dsp_set_runlevel(RUNLEVEL_RECOVERY);
1635 dsp_exunmap(dspadr);
1636 dsp_err_clear(ERRCODE_MMU);
1639 #endif /* CONFIG_ARCH_OMAP1 */
1641 #ifdef CONFIG_ARCH_OMAP2
1642 #define MMU_IRQ_MASK \
1643 (DSP_MMU_IRQ_MULTIHITFAULT | \
1644 DSP_MMU_IRQ_TABLEWALKFAULT | \
1645 DSP_MMU_IRQ_EMUMISS | \
1646 DSP_MMU_IRQ_TRANSLATIONFAULT | \
1647 DSP_MMU_IRQ_TLBMISS)
1650 static void dsp_mmu_init(void)
1652 struct tlb_lock tlb_lock;
1654 #ifdef CONFIG_ARCH_OMAP1
1655 clk_enable(dsp_ck_handle);
1656 omap_dsp_request_mem();
1658 down_write(&exmap_sem);
1660 #if defined(CONFIG_ARCH_OMAP1)
1661 dsp_mmu_disable(); /* clear all */
1663 #elif defined(CONFIG_ARCH_OMAP2)
1668 /* DSP TLB initialization */
1670 tlb_lock.victim = 0;
1671 set_tlb_lock(&tlb_lock);
1673 exmap_preserved_cnt = exmap_setup_preserved_entries();
1675 #ifdef CONFIG_ARCH_OMAP2
1676 /* MMU IRQ mask setup */
1677 dsp_mmu_write_reg(MMU_IRQ_MASK, DSP_MMU_IRQENABLE);
1680 up_write(&exmap_sem);
1681 #ifdef CONFIG_ARCH_OMAP1
1682 omap_dsp_release_mem();
1683 clk_disable(dsp_ck_handle);
1687 static void dsp_mmu_shutdown(void)
1690 exmap_clear_preserved_entries();
1694 #ifdef CONFIG_ARCH_OMAP1
1696 * intmem_enable() / disable():
1697 * if the address is in DSP internal memories,
1698 * we send PM mailbox commands so that DSP DMA domain won't go in idle
1699 * when ARM is accessing to those memories.
1701 static int intmem_enable(void)
1705 if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1706 ret = mbcompose_send(PM, PM_ENABLE, DSPREG_ICR_DMA);
1711 static void intmem_disable(void) {
1712 if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1713 mbcompose_send(PM, PM_DISABLE, DSPREG_ICR_DMA);
1715 #endif /* CONFIG_ARCH_OMAP1 */
1718 * dsp_mem_enable() / disable()
1720 #ifdef CONFIG_ARCH_OMAP1
1721 int intmem_usecount;
1724 int dsp_mem_enable(void *adr)
1728 if (is_dsp_internal_mem(adr)) {
1729 #ifdef CONFIG_ARCH_OMAP1
1730 if (intmem_usecount++ == 0)
1731 ret = omap_dsp_request_mem();
1734 down_read(&exmap_sem);
1739 void dsp_mem_disable(void *adr)
1741 if (is_dsp_internal_mem(adr)) {
1742 #ifdef CONFIG_ARCH_OMAP1
1743 if (--intmem_usecount == 0)
1744 omap_dsp_release_mem();
1747 up_read(&exmap_sem);
1751 #ifdef CONFIG_ARCH_OMAP1
1752 void dsp_mem_usecount_clear(void)
1754 if (intmem_usecount != 0) {
1756 "omapdsp: unbalanced memory request/release detected.\n"
1757 " intmem_usecount is not zero at where "
1758 "it should be! ... fixed to be zero.\n");
1759 intmem_usecount = 0;
1760 omap_dsp_release_mem();
1763 #endif /* CONFIG_ARCH_OMAP1 */
1766 * dsp_mem file operations
1768 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1772 mutex_lock(&file->f_dentry->d_inode->i_mutex);
1775 file->f_pos = offset;
1779 file->f_pos += offset;
1785 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1789 static ssize_t intmem_read(struct file *file, char __user *buf, size_t count,
1792 unsigned long p = *ppos;
1793 void *vadr = dspbyte_to_virt(p);
1794 ssize_t size = dspmem_size;
1799 #ifdef CONFIG_ARCH_OMAP1
1800 clk_enable(api_ck_handle);
1803 if (count > size - p)
1805 if (copy_to_user(buf, vadr, read)) {
1811 #ifdef CONFIG_ARCH_OMAP1
1812 clk_disable(api_ck_handle);
1817 static ssize_t exmem_read(struct file *file, char __user *buf, size_t count,
1820 unsigned long p = *ppos;
1821 void *vadr = dspbyte_to_virt(p);
1823 if (!exmap_valid(vadr, count)) {
1825 "omapdsp: DSP address %08lx / size %08x "
1826 "is not valid!\n", p, count);
1829 if (count > DSPSPACE_SIZE - p)
1830 count = DSPSPACE_SIZE - p;
1831 if (copy_to_user(buf, vadr, count))
1838 static ssize_t dsp_mem_read(struct file *file, char __user *buf, size_t count,
1842 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1844 if (dsp_mem_enable(vadr) < 0)
1846 if (is_dspbyte_internal_mem(*ppos))
1847 ret = intmem_read(file, buf, count, ppos);
1849 ret = exmem_read(file, buf, count, ppos);
1850 dsp_mem_disable(vadr);
1855 static ssize_t intmem_write(struct file *file, const char __user *buf,
1856 size_t count, loff_t *ppos)
1858 unsigned long p = *ppos;
1859 void *vadr = dspbyte_to_virt(p);
1860 ssize_t size = dspmem_size;
1865 #ifdef CONFIG_ARCH_OMAP1
1866 clk_enable(api_ck_handle);
1869 if (count > size - p)
1871 if (copy_from_user(vadr, buf, written)) {
1877 #ifdef CONFIG_ARCH_OMAP1
1878 clk_disable(api_ck_handle);
1883 static ssize_t exmem_write(struct file *file, const char __user *buf,
1884 size_t count, loff_t *ppos)
1886 unsigned long p = *ppos;
1887 void *vadr = dspbyte_to_virt(p);
1889 if (!exmap_valid(vadr, count)) {
1891 "omapdsp: DSP address %08lx / size %08x "
1892 "is not valid!\n", p, count);
1895 if (count > DSPSPACE_SIZE - p)
1896 count = DSPSPACE_SIZE - p;
1897 if (copy_from_user(vadr, buf, count))
1904 static ssize_t dsp_mem_write(struct file *file, const char __user *buf,
1905 size_t count, loff_t *ppos)
1908 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1910 if (dsp_mem_enable(vadr) < 0)
1912 if (is_dspbyte_internal_mem(*ppos))
1913 ret = intmem_write(file, buf, count, ppos);
1915 ret = exmem_write(file, buf, count, ppos);
1916 dsp_mem_disable(vadr);
1921 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1922 unsigned int cmd, unsigned long arg)
1925 case MEM_IOCTL_MMUINIT:
1929 case MEM_IOCTL_EXMAP:
1931 struct omap_dsp_mapinfo mapinfo;
1932 if (copy_from_user(&mapinfo, (void __user *)arg,
1935 return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1939 case MEM_IOCTL_EXUNMAP:
1940 return dsp_exunmap((unsigned long)arg);
1942 case MEM_IOCTL_EXMAP_FLUSH:
1946 case MEM_IOCTL_FBEXPORT:
1950 if (copy_from_user(&dspadr, (void __user *)arg,
1951 sizeof(dsp_long_t)))
1953 ret = dsp_fbexport(&dspadr);
1954 if (copy_to_user((void __user *)arg, &dspadr,
1955 sizeof(dsp_long_t)))
1960 #ifdef CONFIG_ARCH_OMAP1
1961 case MEM_IOCTL_MMUITACK:
1962 return dsp_mmu_itack();
1965 case MEM_IOCTL_KMEM_RESERVE:
1968 if (copy_from_user(&size, (void __user *)arg,
1971 return dsp_kmem_reserve(size);
1974 case MEM_IOCTL_KMEM_RELEASE:
1979 return -ENOIOCTLCMD;
1983 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1991 static int dsp_mem_open(struct inode *inode, struct file *file)
1993 if (!capable(CAP_SYS_RAWIO))
1999 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
2001 * fb update functions:
2002 * fbupd_response() is executed by the workqueue.
2003 * fbupd_cb() is called when fb update is done, in interrupt context.
2004 * mbox_fbupd() is called when KFUNC:FBCTL:UPD is received from DSP.
2006 static void fbupd_response(void *arg)
2010 status = mbcompose_send(KFUNC, KFUNC_FBCTL, FBCTL_UPD);
2012 /* FIXME: DSP is busy !! */
2014 "omapdsp: DSP is busy when trying to send FBCTL:UPD "
2019 static DECLARE_WORK(fbupd_response_work, (void (*)(void *))fbupd_response,
2022 static void fbupd_cb(void *arg)
2024 schedule_work(&fbupd_response_work);
2027 void mbox_fbctl_upd(void)
2029 struct omapfb_update_window win;
2030 volatile unsigned short *buf = ipbuf_sys_da->d;
2032 /* FIXME: try count sometimes exceeds 1000. */
2033 if (sync_with_dsp(&ipbuf_sys_da->s, TID_ANON, 5000) < 0) {
2034 printk(KERN_ERR "mbox: FBCTL:UPD - IPBUF sync failed!\n");
2040 win.height = buf[3];
2041 win.format = buf[4];
2042 release_ipbuf_pvt(ipbuf_sys_da);
2044 if (!omapfb_ready) {
2046 "omapdsp: fbupd() called while HWA742 is not ready!\n");
2049 //printk("calling omapfb_update_window_async()\n");
2050 omapfb_update_window_async(registered_fb[1], &win, fbupd_cb, NULL);
2053 #else /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2055 void mbox_fbctl_upd(void)
2058 #endif /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2065 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
2069 struct tlb_lock tlb_lock_org;
2072 #ifdef CONFIG_ARCH_OMAP1
2073 clk_enable(dsp_ck_handle);
2074 omap_dsp_request_mem();
2076 down_read(&exmap_sem);
2078 get_tlb_lock(&tlb_lock_org);
2080 #if defined(CONFIG_ARCH_OMAP1)
2081 len = sprintf(buf, "P: preserved, V: valid\n"
2082 "ety P V size cam_va ram_pa ap\n");
2083 /* 00: P V 4KB 0x300000 0x10171800 FA */
2084 #elif defined(CONFIG_ARCH_OMAP2)
2085 len = sprintf(buf, "P: preserved, V: valid\n"
2086 "B: big endian, L:little endian, "
2087 "M: mixed page attribute\n"
2088 "ety P V size cam_va ram_pa E ES M\n");
2089 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
2092 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2093 struct cam_ram_regset cr;
2094 struct tlb_lock tlb_lock_tmp;
2095 struct tlb_entry ent;
2096 #if defined(CONFIG_ARCH_OMAP1)
2097 char *pgsz_str, *ap_str;
2098 #elif defined(CONFIG_ARCH_OMAP2)
2099 char *pgsz_str, *elsz_str;
2102 /* read a TLB entry */
2103 tlb_lock_tmp.base = tlb_lock_org.base;
2104 tlb_lock_tmp.victim = i;
2105 __read_tlb(&tlb_lock_tmp, &cr);
2107 #if defined(CONFIG_ARCH_OMAP1)
2108 ent.pgsz = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
2109 ent.prsvd = cr.cam_l & DSP_MMU_CAM_P;
2110 ent.valid = cr.cam_l & DSP_MMU_CAM_V;
2111 ent.ap = cr.ram_l & DSP_MMU_RAM_L_AP_MASK;
2112 ent.va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
2113 (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
2114 ent.pa = (unsigned long)cr.ram_h << 16 |
2115 (cr.ram_l & DSP_MMU_RAM_L_RAM_LSB_MASK);
2117 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
2118 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2119 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
2120 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1KB) ? " 1KB":
2122 ap_str = (ent.ap == DSP_MMU_RAM_L_AP_RO) ? "RO":
2123 (ent.ap == DSP_MMU_RAM_L_AP_FA) ? "FA":
2124 (ent.ap == DSP_MMU_RAM_L_AP_NA) ? "NA":
2126 #elif defined(CONFIG_ARCH_OMAP2)
2127 ent.pgsz = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
2128 ent.prsvd = cr.cam & DSP_MMU_CAM_P;
2129 ent.valid = cr.cam & DSP_MMU_CAM_V;
2130 ent.va = cr.cam & DSP_MMU_CAM_VATAG_MASK;
2131 ent.endian = cr.ram & DSP_MMU_RAM_ENDIANNESS;
2132 ent.elsz = cr.ram & DSP_MMU_RAM_ELEMENTSIZE_MASK;
2133 ent.pa = cr.ram & DSP_MMU_RAM_PADDR_MASK;
2134 ent.mixed = cr.ram & DSP_MMU_RAM_MIXED;
2136 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
2137 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
2138 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2139 (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
2141 elsz_str = (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_8) ? " 8":
2142 (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_16) ? "16":
2143 (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_32) ? "32":
2147 if (i == tlb_lock_org.base)
2148 len += sprintf(buf + len, "lock base = %d\n",
2150 if (i == tlb_lock_org.victim)
2151 len += sprintf(buf + len, "victim = %d\n",
2152 tlb_lock_org.victim);
2153 #if defined(CONFIG_ARCH_OMAP1)
2154 len += sprintf(buf + len,
2155 /* 00: P V 4KB 0x300000 0x10171800 FA */
2156 "%02d: %c %c %s 0x%06x 0x%08lx %s\n",
2158 ent.prsvd ? 'P' : ' ',
2159 ent.valid ? 'V' : ' ',
2160 pgsz_str, ent.va, ent.pa, ap_str);
2161 #elif defined(CONFIG_ARCH_OMAP2)
2162 len += sprintf(buf + len,
2163 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
2164 "%02d: %c %c %s 0x%06x 0x%08lx %c %s %c\n",
2166 ent.prsvd ? 'P' : ' ',
2167 ent.valid ? 'V' : ' ',
2168 pgsz_str, ent.va, ent.pa,
2169 ent.endian ? 'B' : 'L',
2171 ent.mixed ? 'M' : ' ');
2172 #endif /* CONFIG_ARCH_OMAP2 */
2175 /* restore victim entry */
2176 set_tlb_lock(&tlb_lock_org);
2178 up_read(&exmap_sem);
2179 #ifdef CONFIG_ARCH_OMAP1
2180 omap_dsp_release_mem();
2181 clk_disable(dsp_ck_handle);
2187 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
2193 down_read(&exmap_sem);
2194 len = sprintf(buf, " dspadr size buf size uc\n");
2195 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
2196 for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2197 struct exmap_tbl_entry *ent = &exmap_tbl[i];
2200 enum exmap_type_e type;
2203 /* find a top of link */
2204 if (!ent->valid || (ent->link.prev >= 0))
2212 ent = &exmap_tbl[idx];
2213 size += PAGE_SIZE << ent->order;
2214 } while ((idx = ent->link.next) >= 0);
2216 len += sprintf(buf + len, "0x%06x %#8lx",
2217 virt_to_dspbyte(vadr), size);
2219 if (type == EXMAP_TYPE_FB) {
2220 len += sprintf(buf + len, " framebuf\n");
2222 len += sprintf(buf + len, "\n");
2225 ent = &exmap_tbl[idx];
2226 len += sprintf(buf + len,
2227 /* 0xc0171000 0x100000 0*/
2228 "%19s0x%8p %#8lx %2d\n",
2230 PAGE_SIZE << ent->order,
2232 } while ((idx = ent->link.next) >= 0);
2236 up_read(&exmap_sem);
2241 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
2244 int min_nr_1M = 0, curr_nr_1M = 0;
2245 int min_nr_64K = 0, curr_nr_64K = 0;
2248 if (likely(kmem_pool_1M)) {
2249 min_nr_1M = kmem_pool_1M->min_nr;
2250 curr_nr_1M = kmem_pool_1M->curr_nr;
2251 total += min_nr_1M * SZ_1MB;
2253 if (likely(kmem_pool_64K)) {
2254 min_nr_64K = kmem_pool_64K->min_nr;
2255 curr_nr_64K = kmem_pool_64K->curr_nr;
2256 total += min_nr_64K * SZ_64KB;
2261 "1M buffer: %d (%d free)\n"
2262 "64K buffer: %d (%d free)\n",
2263 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
2267 * workqueue for mmu int
2269 #ifdef CONFIG_ARCH_OMAP1
2272 * We ignore prefetch err.
2274 #define MMUFAULT_MASK \
2275 (DSP_MMU_FAULT_ST_PERM |\
2276 DSP_MMU_FAULT_ST_TLB_MISS |\
2277 DSP_MMU_FAULT_ST_TRANS)
2278 #endif /* CONFIG_ARCH_OMAP1 */
2280 static void do_mmu_int(void)
2282 #if defined(CONFIG_ARCH_OMAP1)
2284 dsp_mmu_reg_t status;
2285 dsp_mmu_reg_t adh, adl;
2288 status = dsp_mmu_read_reg(DSP_MMU_FAULT_ST);
2289 adh = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_H);
2290 adl = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_L);
2291 dp = adh & DSP_MMU_FAULT_AD_H_DP;
2292 dsp_fault_adr = MK32(adh & DSP_MMU_FAULT_AD_H_ADR_MASK, adl);
2294 /* if the fault is masked, nothing to do */
2295 if ((status & MMUFAULT_MASK) == 0) {
2296 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
2298 * note: in OMAP1710,
2299 * when CACHE + DMA domain gets out of idle in DSP,
2300 * MMU interrupt occurs but DSP_MMU_FAULT_ST is not set.
2301 * in this case, we just ignore the interrupt.
2304 printk(KERN_DEBUG "%s%s%s%s\n",
2305 (status & DSP_MMU_FAULT_ST_PREF)?
2306 " (prefetch err)" : "",
2307 (status & DSP_MMU_FAULT_ST_PERM)?
2308 " (permission fault)" : "",
2309 (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2311 (status & DSP_MMU_FAULT_ST_TRANS) ?
2312 " (translation fault)": "");
2313 printk(KERN_DEBUG "fault address = %#08x\n",
2316 enable_irq(INT_DSP_MMU);
2320 #elif defined(CONFIG_ARCH_OMAP2)
2322 dsp_mmu_reg_t status;
2324 status = dsp_mmu_read_reg(DSP_MMU_IRQSTATUS);
2325 dsp_fault_adr = dsp_mmu_read_reg(DSP_MMU_FAULT_AD);
2327 #endif /* CONFIG_ARCH_OMAP2 */
2329 printk(KERN_INFO "DSP MMU interrupt!\n");
2331 #if defined(CONFIG_ARCH_OMAP1)
2333 printk(KERN_INFO "%s%s%s%s\n",
2334 (status & DSP_MMU_FAULT_ST_PREF)?
2335 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PREF)?
2339 (status & DSP_MMU_FAULT_ST_PERM)?
2340 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PERM)?
2341 " permission fault":
2342 " (permission fault)":
2344 (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2345 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TLB_MISS)?
2349 (status & DSP_MMU_FAULT_ST_TRANS)?
2350 (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TRANS)?
2351 " translation fault":
2352 " (translation fault)":
2355 #elif defined(CONFIG_ARCH_OMAP2)
2357 printk(KERN_INFO "%s%s%s%s%s\n",
2358 (status & DSP_MMU_IRQ_MULTIHITFAULT)?
2359 (MMU_IRQ_MASK & DSP_MMU_IRQ_MULTIHITFAULT)?
2363 (status & DSP_MMU_IRQ_TABLEWALKFAULT)?
2364 (MMU_IRQ_MASK & DSP_MMU_IRQ_TABLEWALKFAULT)?
2365 " table walk fault":
2366 " (table walk fault)":
2368 (status & DSP_MMU_IRQ_EMUMISS)?
2369 (MMU_IRQ_MASK & DSP_MMU_IRQ_EMUMISS)?
2373 (status & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2374 (MMU_IRQ_MASK & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2375 " translation fault":
2376 " (translation fault)":
2378 (status & DSP_MMU_IRQ_TLBMISS)?
2379 (MMU_IRQ_MASK & DSP_MMU_IRQ_TLBMISS)?
2384 #endif /* CONFIG_ARCH_OMAP2 */
2386 printk(KERN_INFO "fault address = %#08x\n", dsp_fault_adr);
2388 if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
2389 dsp_err_set(ERRCODE_MMU, (unsigned long)dsp_fault_adr);
2391 #ifdef CONFIG_ARCH_OMAP1
2394 printk(KERN_INFO "Resetting DSP...\n");
2395 dsp_cpustat_request(CPUSTAT_RESET);
2397 * if we enable followings, semaphore lock should be avoided.
2399 printk(KERN_INFO "Flushing DSP MMU...\n");
2405 #ifdef CONFIG_ARCH_OMAP2
2407 dsp_mmu_write_reg(status, DSP_MMU_IRQSTATUS);
2411 enable_irq(INT_DSP_MMU);
2414 static DECLARE_WORK(mmu_int_work, (void (*)(void *))do_mmu_int, NULL);
2417 * DSP MMU interrupt handler
2420 static irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id,
2421 struct pt_regs *regs)
2423 disable_irq(INT_DSP_MMU);
2424 schedule_work(&mmu_int_work);
2431 struct file_operations dsp_mem_fops = {
2432 .owner = THIS_MODULE,
2433 .llseek = dsp_mem_lseek,
2434 .read = dsp_mem_read,
2435 .write = dsp_mem_write,
2436 .ioctl = dsp_mem_ioctl,
2437 .mmap = dsp_mem_mmap,
2438 .open = dsp_mem_open,
2441 void dsp_mem_start(void)
2443 #ifdef CONFIG_ARCH_OMAP1
2444 dsp_register_mem_cb(intmem_enable, intmem_disable);
2448 void dsp_mem_stop(void)
2450 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
2451 #ifdef CONFIG_ARCH_OMAP1
2452 dsp_unregister_mem_cb();
2456 static char devid_mmu;
2458 int __init dsp_mem_init(void)
2462 #ifdef CONFIG_ARCH_OMAP2
2463 int dspmem_pg_count;
2465 dspmem_pg_count = dspmem_size >> 12;
2466 for (i = 0; i < dspmem_pg_count; i++) {
2467 dsp_ipi_write_reg(i, DSP_IPI_INDEX);
2468 dsp_ipi_write_reg(DSP_IPI_ENTRY_ELMSIZEVALUE_16, DSP_IPI_ENTRY);
2470 dsp_ipi_write_reg(1, DSP_IPI_ENABLE);
2472 dsp_ipi_write_reg(IOMAP_VAL, DSP_IPI_IOMAP);
2475 for (i = 0; i < DSP_MMU_TLB_LINES; i++)
2476 exmap_tbl[i].valid = 0;
2478 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
2479 if (dspvect_page == NULL) {
2481 "omapdsp: failed to allocate memory "
2482 "for dsp vector table\n");
2486 #ifdef CONFIG_ARCH_OMAP1
2487 dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
2491 * DSP MMU interrupt setup
2493 ret = request_irq(INT_DSP_MMU, dsp_mmu_interrupt, SA_INTERRUPT, "dsp",
2497 "failed to register DSP MMU interrupt: %d\n", ret);
2501 /* MMU interrupt is not enabled until DSP runs */
2502 disable_irq(INT_DSP_MMU);
2504 device_create_file(&dsp_device.dev, &dev_attr_mmu);
2505 device_create_file(&dsp_device.dev, &dev_attr_exmap);
2506 device_create_file(&dsp_device.dev, &dev_attr_mempool);
2511 #ifdef CONFIG_ARCH_OMAP1
2512 dsp_reset_idle_boot_base();
2515 free_page((unsigned long)dspvect_page);
2516 dspvect_page = NULL;
2520 void dsp_mem_exit(void)
2522 free_irq(INT_DSP_MMU, &devid_mmu);
2524 /* recover disable_depth */
2525 enable_irq(INT_DSP_MMU);
2527 #ifdef CONFIG_ARCH_OMAP1
2528 dsp_reset_idle_boot_base();
2533 if (dspvect_page != NULL) {
2534 free_page((unsigned long)dspvect_page);
2535 dspvect_page = NULL;
2538 device_remove_file(&dsp_device.dev, &dev_attr_mmu);
2539 device_remove_file(&dsp_device.dev, &dev_attr_exmap);
2540 device_remove_file(&dsp_device.dev, &dev_attr_mempool);