]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dsp/dsp_mem.c
8442a960c370050a6abaf92dfff29a5026c4caee
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dsp / dsp_mem.c
1 /*
2  * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
3  *
4  * Copyright (C) 2002-2006 Nokia Corporation. All rights reserved.
5  *
6  * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
7  *
8  * Conversion to mempool API and ARM MMU section mapping
9  * by Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * version 2 as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23  * 02110-1301 USA
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/fs.h>
30 #include <linux/fb.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/mempool.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <asm/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h>
41 #include <asm/arch/tc.h>
42 #include <asm/arch/omapfb.h>
43 #include <asm/arch/mailbox.h>
44 #include <asm/arch/dsp_common.h>
45 #include "uaccess_dsp.h"
46 #include "dsp_mbcmd.h"
47 #include "dsp.h"
48 #include "ioctl.h"
49 #include "ipbuf.h"
50
51 #ifdef CONFIG_ARCH_OMAP2
52 #define IOMAP_VAL       0x3f
53 #endif
54
55 #define SZ_1KB  0x400
56 #define SZ_4KB  0x1000
57 #define SZ_64KB 0x10000
58 #define SZ_1MB  0x100000
59 #define SZ_16MB 0x1000000
60 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
61 #define ORDER_4KB       (12 - PAGE_SHIFT)
62 #define ORDER_64KB      (16 - PAGE_SHIFT)
63 #define ORDER_1MB       (20 - PAGE_SHIFT)
64
65 /*
66  * absorb DSP MMU register size and location difference
67  */
68 #if defined(CONFIG_ARCH_OMAP1)
69 typedef u16 dsp_mmu_reg_t;
70 #define dsp_mmu_read_reg(a)     omap_readw(a)
71 #define dsp_mmu_write_reg(v,a)  omap_writew(v,a)
72 #elif defined(CONFIG_ARCH_OMAP2)
73 typedef u32 dsp_mmu_reg_t;
74 #define dsp_mmu_read_reg(a)     readl(a)
75 #define dsp_mmu_write_reg(v,a)  writel(v,a)
76 #define dsp_ipi_read_reg(a)     readl(a)
77 #define dsp_ipi_write_reg(v,a)  writel(v,a)
78 #endif
79
80 #if defined(CONFIG_ARCH_OMAP1)
81
82 #define dsp_mmu_enable() \
83         do { \
84                 dsp_mmu_write_reg(DSP_MMU_CNTL_MMU_EN | DSP_MMU_CNTL_RESET_SW, \
85                                   DSP_MMU_CNTL); \
86         } while(0)
87 #define dsp_mmu_disable() \
88         do { \
89                 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
90         } while(0)
91 #define __dsp_mmu_itack() \
92         do { \
93                 dsp_mmu_write_reg(DSP_MMU_IT_ACK_IT_ACK, DSP_MMU_IT_ACK); \
94         } while(0)
95
96 #elif defined(CONFIG_ARCH_OMAP2)
97
98 #define dsp_mmu_enable() \
99         do { \
100                 dsp_mmu_write_reg(DSP_MMU_CNTL_MMUENABLE, DSP_MMU_CNTL); \
101         } while(0)
102 #define dsp_mmu_disable() \
103         do { \
104                 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
105         } while(0)
106 #define dsp_mmu_reset() \
107         do { \
108                 dsp_mmu_write_reg(dsp_mmu_read_reg(DSP_MMU_SYSCONFIG) | \
109                                   DSP_MMU_SYSCONFIG_SOFTRESET, \
110                                   DSP_MMU_SYSCONFIG); \
111         } while(0)
112
113 #endif /* CONFIG_ARCH_OMAP2 */
114
115 #define dsp_mmu_flush() \
116         do { \
117                 dsp_mmu_write_reg(DSP_MMU_FLUSH_ENTRY_FLUSH_ENTRY, \
118                                   DSP_MMU_FLUSH_ENTRY); \
119         } while(0)
120 #define __dsp_mmu_gflush() \
121         do { \
122                 dsp_mmu_write_reg(DSP_MMU_GFLUSH_GFLUSH, DSP_MMU_GFLUSH); \
123         } while(0)
124
125 /*
126  * absorb register name difference
127  */
128 #ifdef CONFIG_ARCH_OMAP1
129 #define DSP_MMU_CAM_P                   DSP_MMU_CAM_L_P
130 #define DSP_MMU_CAM_V                   DSP_MMU_CAM_L_V
131 #define DSP_MMU_CAM_PAGESIZE_MASK       DSP_MMU_CAM_L_PAGESIZE_MASK
132 #define DSP_MMU_CAM_PAGESIZE_1MB        DSP_MMU_CAM_L_PAGESIZE_1MB
133 #define DSP_MMU_CAM_PAGESIZE_64KB       DSP_MMU_CAM_L_PAGESIZE_64KB
134 #define DSP_MMU_CAM_PAGESIZE_4KB        DSP_MMU_CAM_L_PAGESIZE_4KB
135 #define DSP_MMU_CAM_PAGESIZE_1KB        DSP_MMU_CAM_L_PAGESIZE_1KB
136 #endif /* CONFIG_ARCH_OMAP1 */
137
138 /*
139  * OMAP1 EMIFF access
140  */
141 #ifdef CONFIG_ARCH_OMAP1
142 #define EMIF_PRIO_LB_MASK       0x0000f000
143 #define EMIF_PRIO_LB_SHIFT      12
144 #define EMIF_PRIO_DMA_MASK      0x00000f00
145 #define EMIF_PRIO_DMA_SHIFT     8
146 #define EMIF_PRIO_DSP_MASK      0x00000070
147 #define EMIF_PRIO_DSP_SHIFT     4
148 #define EMIF_PRIO_MPU_MASK      0x00000007
149 #define EMIF_PRIO_MPU_SHIFT     0
150 #define set_emiff_dma_prio(prio) \
151         do { \
152                 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
153                              ~EMIF_PRIO_DMA_MASK) | \
154                             ((prio) << EMIF_PRIO_DMA_SHIFT), \
155                             OMAP_TC_OCPT1_PRIOR); \
156         } while(0)
157 #endif /* CONFIG_ARCH_OMAP1 */
158
159 enum exmap_type_e {
160         EXMAP_TYPE_MEM,
161         EXMAP_TYPE_FB
162 };
163
164 struct exmap_tbl_entry {
165         unsigned int valid:1;
166         unsigned int prsvd:1;   /* preserved */
167         int usecount;           /* reference count by mmap */
168         enum exmap_type_e type;
169         void *buf;              /* virtual address of the buffer,
170                                  * i.e. 0xc0000000 - */
171         void *vadr;             /* DSP shadow space,
172                                  * i.e. 0xe0000000 - 0xe0ffffff */
173         unsigned int order;
174         struct {
175                 int prev;
176                 int next;
177         } link;                 /* grouping */
178 };
179
180 #define INIT_EXMAP_TBL_ENTRY(ent,b,v,typ,od) \
181         do {\
182                 (ent)->buf       = (b); \
183                 (ent)->vadr      = (v); \
184                 (ent)->valid     = 1; \
185                 (ent)->prsvd     = 0; \
186                 (ent)->usecount  = 0; \
187                 (ent)->type      = (typ); \
188                 (ent)->order     = (od); \
189                 (ent)->link.next = -1; \
190                 (ent)->link.prev = -1; \
191         } while (0)
192
193 #define INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(ent,b,v) \
194         do {\
195                 (ent)->buf       = (b); \
196                 (ent)->vadr      = (v); \
197                 (ent)->valid     = 1; \
198                 (ent)->prsvd     = 1; \
199                 (ent)->usecount  = 0; \
200                 (ent)->type      = EXMAP_TYPE_MEM; \
201                 (ent)->order     = 0; \
202                 (ent)->link.next = -1; \
203                 (ent)->link.prev = -1; \
204         } while (0)
205
206 #define DSP_MMU_TLB_LINES       32
207 static struct exmap_tbl_entry exmap_tbl[DSP_MMU_TLB_LINES];
208 static int exmap_preserved_cnt;
209 static DECLARE_RWSEM(exmap_sem);
210
211 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
212 static struct omapfb_notifier_block *omapfb_nb;
213 static int omapfb_ready;
214 #endif
215
216 struct cam_ram_regset {
217 #if defined(CONFIG_ARCH_OMAP1)
218         dsp_mmu_reg_t cam_h;
219         dsp_mmu_reg_t cam_l;
220         dsp_mmu_reg_t ram_h;
221         dsp_mmu_reg_t ram_l;
222 #elif defined(CONFIG_ARCH_OMAP2)
223         dsp_mmu_reg_t cam;
224         dsp_mmu_reg_t ram;
225 #endif
226 };
227
228 struct tlb_entry {
229         dsp_long_t va;
230         unsigned long pa;
231         dsp_mmu_reg_t pgsz, prsvd, valid;
232 #if defined(CONFIG_ARCH_OMAP1)
233         dsp_mmu_reg_t ap;
234 #elif defined(CONFIG_ARCH_OMAP2)
235         dsp_mmu_reg_t endian, elsz, mixed;
236 #endif
237 };
238
239 #if defined(CONFIG_ARCH_OMAP1)
240 #define INIT_TLB_ENTRY(ent,v,p,ps) \
241         do { \
242                 (ent)->va = (v); \
243                 (ent)->pa = (p); \
244                 (ent)->pgsz = (ps); \
245                 (ent)->prsvd = 0; \
246                 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
247         } while (0)
248 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
249         do { \
250                 (ent)->va = (v); \
251                 (ent)->pa = (p); \
252                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
253                 (ent)->prsvd = DSP_MMU_CAM_P; \
254                 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
255         } while (0)
256 #elif defined(CONFIG_ARCH_OMAP2)
257 #define INIT_TLB_ENTRY(ent,v,p,ps) \
258         do { \
259                 (ent)->va = (v); \
260                 (ent)->pa = (p); \
261                 (ent)->pgsz = (ps); \
262                 (ent)->prsvd = 0; \
263                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
264                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
265                 (ent)->mixed = 0; \
266         } while (0)
267 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
268         do { \
269                 (ent)->va = (v); \
270                 (ent)->pa = (p); \
271                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
272                 (ent)->prsvd = DSP_MMU_CAM_P; \
273                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
274                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
275                 (ent)->mixed = 0; \
276         } while (0)
277 #define INIT_TLB_ENTRY_4KB_ES32_PRESERVED(ent,v,p) \
278         do { \
279                 (ent)->va = (v); \
280                 (ent)->pa = (p); \
281                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
282                 (ent)->prsvd = DSP_MMU_CAM_P; \
283                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
284                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_32; \
285                 (ent)->mixed = 0; \
286         } while (0)
287 #endif
288
289 #if defined(CONFIG_ARCH_OMAP1)
290 #define cam_ram_valid(cr)       ((cr).cam_l & DSP_MMU_CAM_V)
291 #elif defined(CONFIG_ARCH_OMAP2)
292 #define cam_ram_valid(cr)       ((cr).cam & DSP_MMU_CAM_V)
293 #endif
294
295 struct tlb_lock {
296         int base;
297         int victim;
298 };
299
300 static int dsp_exunmap(dsp_long_t dspadr);
301
302 static void *dspvect_page;
303 static u32 dsp_fault_adr;
304 static struct mem_sync_struct mem_sync;
305
306 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
307                         char *buf);
308 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
309                           char *buf);
310 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
311                             char *buf);
312
313 static struct device_attribute dev_attr_mmu =     __ATTR_RO(mmu);
314 static struct device_attribute dev_attr_exmap =   __ATTR_RO(exmap);
315 static struct device_attribute dev_attr_mempool = __ATTR_RO(mempool);
316
317 /*
318  * special mempool function:
319  * hope this goes to mm/mempool.c
320  */
321 static void *mempool_alloc_from_pool(mempool_t *pool, gfp_t gfp_mask)
322 {
323         unsigned long flags;
324
325         spin_lock_irqsave(&pool->lock, flags);
326         if (likely(pool->curr_nr)) {
327                 void *element = pool->elements[--pool->curr_nr];
328                 spin_unlock_irqrestore(&pool->lock, flags);
329                 return element;
330         }
331         spin_unlock_irqrestore(&pool->lock, flags);
332
333         return mempool_alloc(pool, gfp_mask);
334 }
335
336 static __inline__ unsigned long lineup_offset(unsigned long adr,
337                                               unsigned long ref,
338                                               unsigned long mask)
339 {
340         unsigned long newadr;
341
342         newadr = (adr & ~mask) | (ref & mask);
343         if (newadr < adr)
344                 newadr += mask + 1;
345         return newadr;
346 }
347
348 int dsp_mem_sync_inc(void)
349 {
350         if (dsp_mem_enable((void *)dspmem_base) < 0)
351                 return -1;
352         if (mem_sync.DARAM)
353                 mem_sync.DARAM->ad_arm++;
354         if (mem_sync.SARAM)
355                 mem_sync.SARAM->ad_arm++;
356         if (mem_sync.SDRAM)
357                 mem_sync.SDRAM->ad_arm++;
358         dsp_mem_disable((void *)dspmem_base);
359         return 0;
360 }
361
362 /*
363  * dsp_mem_sync_config() is called from mbox1 workqueue
364  */
365 int dsp_mem_sync_config(struct mem_sync_struct *sync)
366 {
367         size_t sync_seq_sz = sizeof(struct sync_seq);
368
369 #ifdef OLD_BINARY_SUPPORT
370         if (sync == NULL) {
371                 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
372                 return 0;
373         }
374 #endif
375         if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
376             (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
377             (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
378                 printk(KERN_ERR
379                        "omapdsp: mem_sync address validation failure!\n"
380                        "  mem_sync.DARAM = 0x%p,\n"
381                        "  mem_sync.SARAM = 0x%p,\n"
382                        "  mem_sync.SDRAM = 0x%p,\n",
383                        sync->DARAM, sync->SARAM, sync->SDRAM);
384                 return -1;
385         }
386         memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
387         return 0;
388 }
389
390 static mempool_t *kmem_pool_1M;
391 static mempool_t *kmem_pool_64K;
392
393 static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
394 {
395         return (void *)__get_dma_pages(gfp, (unsigned int)order);
396 }
397
398 static void dsp_pool_free(void *buf, void *order)
399 {
400         free_pages((unsigned long)buf, (unsigned int)order);
401 }
402
403 static void dsp_kmem_release(void)
404 {
405         if (kmem_pool_64K) {
406                 mempool_destroy(kmem_pool_64K);
407                 kmem_pool_64K = NULL;
408         }
409
410         if (kmem_pool_1M) {
411                 mempool_destroy(kmem_pool_1M);
412                 kmem_pool_1M = NULL;
413         }
414 }
415
416 static int dsp_kmem_reserve(unsigned long size)
417 {
418         unsigned long len = size;
419
420         /* alignment check */
421         if (!is_aligned(size, SZ_64KB)) {
422                 printk(KERN_ERR
423                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
424                 return -EINVAL;
425         }
426
427         if (size > DSPSPACE_SIZE) {
428                 printk(KERN_ERR
429                        "omapdsp: size(0x%lx) is larger than DSP memory space "
430                        "size (0x%x.\n", size, DSPSPACE_SIZE);
431                 return -EINVAL;
432         }
433
434         if (size >= SZ_1MB) {
435                 int nr = size >> 20;
436
437                 if (likely(!kmem_pool_1M))
438                         kmem_pool_1M = mempool_create(nr,
439                                                       dsp_pool_alloc,
440                                                       dsp_pool_free,
441                                                       (void *)ORDER_1MB);
442                 else
443                         mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
444                                        GFP_KERNEL);
445
446                 size &= ~(0xf << 20);
447         }
448
449         if (size >= SZ_64KB) {
450                 int nr = size >> 16;
451
452                 if (likely(!kmem_pool_64K))
453                         kmem_pool_64K = mempool_create(nr,
454                                                        dsp_pool_alloc,
455                                                        dsp_pool_free,
456                                                        (void *)ORDER_64KB);
457                 else
458                         mempool_resize(kmem_pool_64K,
459                                        kmem_pool_64K->min_nr + nr, GFP_KERNEL);
460
461                 size &= ~(0xf << 16);
462         }
463
464         if (size)
465                 len -= size;
466
467         return len;
468 }
469
470 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
471 {
472         struct page *page, *ps, *pe;
473
474         ps = virt_to_page(buf);
475         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
476
477         for (page = ps; page < pe; page++)
478                 ClearPageReserved(page);
479
480         if ((order == ORDER_64KB) && likely(kmem_pool_64K))
481                 mempool_free((void *)buf, kmem_pool_64K);
482         else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
483                 mempool_free((void *)buf, kmem_pool_1M);
484         else
485                 free_pages(buf, order);
486 }
487
488 static inline void
489 exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
490 {
491         pgd_t *pgd;
492         pud_t *pud;
493         pmd_t *pmd;
494         pte_t *pte;
495
496         pgd = pgd_offset_k(virt);
497         pud = pud_offset(pgd, virt);
498         pmd = pmd_offset(pud, virt);
499
500         if (pmd_none(*pmd)) {
501                 pte = pte_alloc_one_kernel(&init_mm, 0);
502                 if (!pte)
503                         return;
504
505                 /* note: two PMDs will be set  */
506                 pmd_populate_kernel(&init_mm, pmd, pte);
507         }
508
509         pte = pte_offset_kernel(pmd, virt);
510         set_pte_ext(pte, pfn_pte(phys >> PAGE_SHIFT, prot), 0);
511 }
512
513 #if 0
514 static inline int
515 exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
516 {
517         pgd_t *pgd;
518         pud_t *pud;
519         pmd_t *pmd;
520
521         pgd = pgd_offset_k(virt);
522         pud = pud_alloc(&init_mm, pgd, virt);
523         pmd = pmd_alloc(&init_mm, pud, virt);
524
525         if (virt & (1 << 20))
526                 pmd++;
527
528         if (!pmd_none(*pmd))
529                 /* No good, fall back on smaller mappings. */
530                 return -EINVAL;
531
532         *pmd = __pmd(phys | prot);
533         flush_pmd_entry(pmd);
534
535         return 0;
536 }
537 #endif
538
539 /*
540  * ARM MMU operations
541  */
542 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
543                             unsigned long size)
544 {
545         long off;
546         pgprot_t prot_pte;
547         int prot_sect;
548
549         printk(KERN_DEBUG
550                "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
551                virt, phys, size);
552
553         prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
554                             L_PTE_DIRTY | L_PTE_WRITE);
555
556         prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
557                     PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
558
559         if (cpu_architecture() <= CPU_ARCH_ARMv5)
560                 prot_sect |= PMD_BIT4;
561
562         off = phys - virt;
563
564         while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
565                 exmap_alloc_pte(virt, virt + off, prot_pte);
566
567                 virt += PAGE_SIZE;
568                 size -= PAGE_SIZE;
569         }
570
571         /* XXX: Not yet.. confuses dspfb -- PFM. */
572 #if 0
573         while (size >= (PGDIR_SIZE / 2)) {
574                 if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
575                         break;
576
577                 virt += (PGDIR_SIZE / 2);
578                 size -= (PGDIR_SIZE / 2);
579         }
580 #endif
581
582         while (size >= PAGE_SIZE) {
583                 exmap_alloc_pte(virt, virt + off, prot_pte);
584
585                 virt += PAGE_SIZE;
586                 size -= PAGE_SIZE;
587         }
588
589         BUG_ON(size);
590
591         return 0;
592 }
593
594         /* XXX: T.Kobayashi
595          * A process can have old mappings. if we want to clear a pmd,
596          * we need to do it for all proceeses that use the old mapping.
597          */
598 #if 0
599 static inline void
600 exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
601 {
602         pte_t *pte;
603
604         pte = pte_offset_map(pmd, addr);
605         do {
606                 if (pte_none(*pte))
607                         continue;
608
609                 pte_clear(&init_mm, addr, pte);
610         } while (pte++, addr += PAGE_SIZE, addr != end);
611
612         pte_unmap(pte - 1);
613 }
614
615 static inline void
616 exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
617 {
618         pmd_t *pmd;
619         unsigned long next;
620
621         pmd = pmd_offset(pud, addr);
622         do {
623                 next = pmd_addr_end(addr, end);
624
625                 if (addr & (1 << 20))
626                         pmd++;
627
628                 if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
629                         *pmd = __pmd(0);
630                         clean_pmd_entry(pmd);
631                         continue;
632                 }
633
634                 if (pmd_none_or_clear_bad(pmd))
635                         continue;
636
637                 exmap_clear_pte_range(pmd, addr, next);
638         } while (pmd++, addr = next, addr != end);
639 }
640
641 static inline void
642 exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
643 {
644         pud_t *pud;
645         unsigned long next;
646
647         pud = pud_offset(pgd, addr);
648         do {
649                 next = pud_addr_end(addr, end);
650                 if (pud_none_or_clear_bad(pud))
651                         continue;
652
653                 exmap_clear_pmd_range(pud, addr, next);
654         } while (pud++, addr = next, addr != end);
655 }
656 #endif
657
658 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
659 {
660 #if 0
661         unsigned long next, end;
662         pgd_t *pgd;
663
664         printk(KERN_DEBUG
665                "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
666                virt, size);
667
668         pgd = pgd_offset_k(virt);
669         end = virt + size;
670         do {
671                 next = pgd_addr_end(virt, end);
672                 if (pgd_none_or_clear_bad(pgd))
673                         continue;
674
675                 exmap_clear_pud_range(pgd, virt, next);
676         } while (pgd++, virt = next, virt != end);
677 #else
678         pgd_t *pgd;
679         pud_t *pud;
680         pmd_t *pmd;
681         pte_t *pte;
682
683         printk(KERN_DEBUG
684                "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
685                virt, size);
686
687         while (size >= PAGE_SIZE) {
688                 pgd = pgd_offset_k(virt);
689                 pud = pud_offset(pgd, virt);
690                 pmd = pmd_offset(pud, virt);
691                 pte = pte_offset_kernel(pmd, virt);
692
693                 pte_clear(&init_mm, virt, pte);
694                 size -= PAGE_SIZE;
695                 virt += PAGE_SIZE;
696         }
697
698         BUG_ON(size);
699 #endif
700 }
701
702 static int exmap_valid(void *vadr, size_t len)
703 {
704         /* exmap_sem should be held before calling this function */
705         int i;
706
707 start:
708         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
709                 void *mapadr;
710                 unsigned long mapsize;
711                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
712
713                 if (!ent->valid)
714                         continue;
715                 mapadr = (void *)ent->vadr;
716                 mapsize = 1 << (ent->order + PAGE_SHIFT);
717                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
718                         if (vadr + len <= mapadr + mapsize) {
719                                 /* this map covers whole address. */
720                                 return 1;
721                         } else {
722                                 /*
723                                  * this map covers partially.
724                                  * check rest portion.
725                                  */
726                                 len -= mapadr + mapsize - vadr;
727                                 vadr = mapadr + mapsize;
728                                 goto start;
729                         }
730                 }
731         }
732
733         return 0;
734 }
735
736 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
737 {
738         void *ds = (void *)daram_base;
739         void *de = (void *)daram_base + daram_size;
740         void *ss = (void *)saram_base;
741         void *se = (void *)saram_base + saram_size;
742         int ret;
743
744         if ((vadr >= ds) && (vadr < de)) {
745                 if (vadr + len > de)
746                         return MEM_TYPE_CROSSING;
747                 else
748                         return MEM_TYPE_DARAM;
749         } else if ((vadr >= ss) && (vadr < se)) {
750                 if (vadr + len > se)
751                         return MEM_TYPE_CROSSING;
752                 else
753                         return MEM_TYPE_SARAM;
754         } else {
755                 down_read(&exmap_sem);
756                 if (exmap_valid(vadr, len))
757                         ret = MEM_TYPE_EXTERN;
758                 else
759                         ret = MEM_TYPE_NONE;
760                 up_read(&exmap_sem);
761                 return ret;
762         }
763 }
764
765 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
766 {
767         if (dsp_mem_type(p, len) <= 0) {
768                 if (fmt != NULL) {
769                         char s[64];
770                         va_list args;
771
772                         va_start(args, fmt);
773                         vsprintf(s, fmt, args);
774                         va_end(args);
775                         printk(KERN_ERR
776                                "omapdsp: %s address(0x%p) and size(0x%x) is "
777                                "not valid!\n"
778                                "         (crossing different type of memories, or \n"
779                                "          external memory space where no "
780                                "actual memory is mapped)\n",
781                                s, p, len);
782                 }
783                 return -1;
784         }
785
786         return 0;
787 }
788
789 /*
790  * exmap_use(), unuse():
791  * when the mapped area is exported to user space with mmap,
792  * the usecount is incremented.
793  * while the usecount > 0, that area can't be released.
794  */
795 void exmap_use(void *vadr, size_t len)
796 {
797         int i;
798
799         down_write(&exmap_sem);
800         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
801                 void *mapadr;
802                 unsigned long mapsize;
803                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
804
805                 if (!ent->valid)
806                         continue;
807                 mapadr = (void *)ent->vadr;
808                 mapsize = 1 << (ent->order + PAGE_SHIFT);
809                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
810                         ent->usecount++;
811         }
812         up_write(&exmap_sem);
813 }
814
815 void exmap_unuse(void *vadr, size_t len)
816 {
817         int i;
818
819         down_write(&exmap_sem);
820         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
821                 void *mapadr;
822                 unsigned long mapsize;
823                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
824
825                 if (!ent->valid)
826                         continue;
827                 mapadr = (void *)ent->vadr;
828                 mapsize = 1 << (ent->order + PAGE_SHIFT);
829                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
830                         ent->usecount--;
831         }
832         up_write(&exmap_sem);
833 }
834
835 /*
836  * dsp_virt_to_phys()
837  * returns physical address, and sets len to valid length
838  */
839 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
840 {
841         int i;
842
843         if (is_dsp_internal_mem(vadr)) {
844                 /* DSRAM or SARAM */
845                 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
846                 return (unsigned long)vadr;
847         }
848
849         /* EXRAM */
850         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
851                 void *mapadr;
852                 unsigned long mapsize;
853                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
854
855                 if (!ent->valid)
856                         continue;
857                 mapadr = (void *)ent->vadr;
858                 mapsize = 1 << (ent->order + PAGE_SHIFT);
859                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
860                         *len = mapadr + mapsize - vadr;
861                         return __pa(ent->buf) + vadr - mapadr;
862                 }
863         }
864
865         /* valid mapping not found */
866         return 0;
867 }
868
869 /*
870  * DSP MMU operations
871  */
872 #ifdef CONFIG_ARCH_OMAP1
873 static dsp_mmu_reg_t get_cam_l_va_mask(dsp_mmu_reg_t pgsz)
874 {
875         switch (pgsz) {
876         case DSP_MMU_CAM_PAGESIZE_1MB:
877                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
878                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
879         case DSP_MMU_CAM_PAGESIZE_64KB:
880                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
881                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
882         case DSP_MMU_CAM_PAGESIZE_4KB:
883                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
884                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
885         case DSP_MMU_CAM_PAGESIZE_1KB:
886                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
887                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
888         }
889         return 0;
890 }
891 #endif /* CONFIG_ARCH_OMAP1 */
892
893 #if defined(CONFIG_ARCH_OMAP1)
894 #define get_cam_va_mask(pgsz) \
895         ((u32)DSP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
896          (u32)get_cam_l_va_mask(pgsz) << 6)
897 #elif defined(CONFIG_ARCH_OMAP2)
898 #define get_cam_va_mask(pgsz) \
899         ((pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
900          (pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? 0xfff00000 : \
901          (pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
902          (pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? 0xfffff000 : 0)
903 #endif /* CONFIG_ARCH_OMAP2 */
904
905 static void get_tlb_lock(struct tlb_lock *tlb_lock)
906 {
907         dsp_mmu_reg_t lock = dsp_mmu_read_reg(DSP_MMU_LOCK);
908
909         tlb_lock->base = (lock & DSP_MMU_LOCK_BASE_MASK) >>
910                          DSP_MMU_LOCK_BASE_SHIFT;
911         tlb_lock->victim = (lock & DSP_MMU_LOCK_VICTIM_MASK) >>
912                            DSP_MMU_LOCK_VICTIM_SHIFT;
913 }
914
915 static void set_tlb_lock(struct tlb_lock *tlb_lock)
916 {
917         dsp_mmu_write_reg((tlb_lock->base   << DSP_MMU_LOCK_BASE_SHIFT) |
918                           (tlb_lock->victim << DSP_MMU_LOCK_VICTIM_SHIFT),
919                           DSP_MMU_LOCK);
920 }
921
922 static void __read_tlb(struct tlb_lock *tlb_lock, struct cam_ram_regset *cr)
923 {
924         /* set victim */
925         set_tlb_lock(tlb_lock);
926
927 #if defined(CONFIG_ARCH_OMAP1)
928         /* read a TLB entry */
929         dsp_mmu_write_reg(DSP_MMU_LD_TLB_RD, DSP_MMU_LD_TLB);
930
931         cr->cam_h = dsp_mmu_read_reg(DSP_MMU_READ_CAM_H);
932         cr->cam_l = dsp_mmu_read_reg(DSP_MMU_READ_CAM_L);
933         cr->ram_h = dsp_mmu_read_reg(DSP_MMU_READ_RAM_H);
934         cr->ram_l = dsp_mmu_read_reg(DSP_MMU_READ_RAM_L);
935 #elif defined(CONFIG_ARCH_OMAP2)
936         cr->cam = dsp_mmu_read_reg(DSP_MMU_READ_CAM);
937         cr->ram = dsp_mmu_read_reg(DSP_MMU_READ_RAM);
938 #endif
939 }
940
941 static void __load_tlb(struct cam_ram_regset *cr)
942 {
943 #if defined(CONFIG_ARCH_OMAP1)
944         dsp_mmu_write_reg(cr->cam_h, DSP_MMU_CAM_H);
945         dsp_mmu_write_reg(cr->cam_l, DSP_MMU_CAM_L);
946         dsp_mmu_write_reg(cr->ram_h, DSP_MMU_RAM_H);
947         dsp_mmu_write_reg(cr->ram_l, DSP_MMU_RAM_L);
948 #elif defined(CONFIG_ARCH_OMAP2)
949         dsp_mmu_write_reg(cr->cam | DSP_MMU_CAM_V, DSP_MMU_CAM);
950         dsp_mmu_write_reg(cr->ram, DSP_MMU_RAM);
951 #endif
952
953         /* flush the entry */
954         dsp_mmu_flush();
955
956         /* load a TLB entry */
957         dsp_mmu_write_reg(DSP_MMU_LD_TLB_LD, DSP_MMU_LD_TLB);
958 }
959
960 static int dsp_mmu_load_tlb(struct tlb_entry *tlb_ent)
961 {
962         struct tlb_lock tlb_lock;
963         struct cam_ram_regset cr;
964
965 #ifdef CONFIG_ARCH_OMAP1
966         clk_enable(dsp_ck_handle);
967         omap_dsp_request_mem();
968 #endif
969
970         get_tlb_lock(&tlb_lock);
971         for (tlb_lock.victim = 0;
972              tlb_lock.victim < tlb_lock.base;
973              tlb_lock.victim++) {
974                 struct cam_ram_regset tmp_cr;
975
976                 /* read a TLB entry */
977                 __read_tlb(&tlb_lock, &tmp_cr);
978                 if (!cam_ram_valid(tmp_cr))
979                         goto found_victim;
980         }
981         set_tlb_lock(&tlb_lock);
982
983 found_victim:
984         /* The last (31st) entry cannot be locked? */
985         if (tlb_lock.victim == 31) {
986                 printk(KERN_ERR "omapdsp: TLB is full.\n");
987                 return -EBUSY;
988         }
989
990         if (tlb_ent->va & ~get_cam_va_mask(tlb_ent->pgsz)) {
991                 printk(KERN_ERR
992                        "omapdsp: mapping vadr (0x%06x) is not "
993                        "aligned boundary\n", tlb_ent->va);
994                 return -EINVAL;
995         }
996
997 #if defined(CONFIG_ARCH_OMAP1)
998         cr.cam_h = tlb_ent->va >> 22;
999         cr.cam_l = (tlb_ent->va >> 6 & get_cam_l_va_mask(tlb_ent->pgsz)) |
1000                    tlb_ent->prsvd | tlb_ent->pgsz;
1001         cr.ram_h = tlb_ent->pa >> 16;
1002         cr.ram_l = (tlb_ent->pa & DSP_MMU_RAM_L_RAM_LSB_MASK) | tlb_ent->ap;
1003 #elif defined(CONFIG_ARCH_OMAP2)
1004         cr.cam = (tlb_ent->va & DSP_MMU_CAM_VATAG_MASK) |
1005                  tlb_ent->prsvd | tlb_ent->pgsz;
1006         cr.ram = tlb_ent->pa | tlb_ent->endian | tlb_ent->elsz;
1007 #endif
1008         __load_tlb(&cr);
1009
1010         /* update lock base */
1011         if (tlb_lock.victim == tlb_lock.base)
1012                 tlb_lock.base++;
1013         tlb_lock.victim = tlb_lock.base;
1014         set_tlb_lock(&tlb_lock);
1015
1016 #ifdef CONFIG_ARCH_OMAP1
1017         omap_dsp_release_mem();
1018         clk_disable(dsp_ck_handle);
1019 #endif
1020         return 0;
1021 }
1022
1023 static int dsp_mmu_clear_tlb(dsp_long_t vadr)
1024 {
1025         struct tlb_lock tlb_lock;
1026         int i;
1027         int max_valid = 0;
1028
1029 #ifdef CONFIG_ARCH_OMAP1
1030         clk_enable(dsp_ck_handle);
1031         omap_dsp_request_mem();
1032 #endif
1033
1034         get_tlb_lock(&tlb_lock);
1035         for (i = 0; i < tlb_lock.base; i++) {
1036                 struct cam_ram_regset cr;
1037                 dsp_long_t cam_va;
1038                 dsp_mmu_reg_t pgsz;
1039
1040                 /* read a TLB entry */
1041                 tlb_lock.victim = i;
1042                 __read_tlb(&tlb_lock, &cr);
1043                 if (!cam_ram_valid(cr))
1044                         continue;
1045
1046 #if defined(CONFIG_ARCH_OMAP1)
1047                 pgsz = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
1048                 cam_va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
1049                          (u32)(cr.cam_l & get_cam_l_va_mask(pgsz)) << 6;
1050 #elif defined(CONFIG_ARCH_OMAP2)
1051                 pgsz = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
1052                 cam_va = cr.cam & get_cam_va_mask(pgsz);
1053 #endif
1054
1055                 if (cam_va == vadr)
1056                         /* flush the entry */
1057                         dsp_mmu_flush();
1058                 else
1059                         max_valid = i;
1060         }
1061
1062         /* set new lock base */
1063         tlb_lock.base   = max_valid + 1;
1064         tlb_lock.victim = max_valid + 1;
1065         set_tlb_lock(&tlb_lock);
1066
1067 #ifdef CONFIG_ARCH_OMAP1
1068         omap_dsp_release_mem();
1069         clk_disable(dsp_ck_handle);
1070 #endif
1071         return 0;
1072 }
1073
1074 static void dsp_mmu_gflush(void)
1075 {
1076         struct tlb_lock tlb_lock;
1077
1078 #ifdef CONFIG_ARCH_OMAP1
1079         clk_enable(dsp_ck_handle);
1080         omap_dsp_request_mem();
1081 #endif
1082
1083         __dsp_mmu_gflush();
1084         tlb_lock.base   = exmap_preserved_cnt;
1085         tlb_lock.victim = exmap_preserved_cnt;
1086         set_tlb_lock(&tlb_lock);
1087
1088 #ifdef CONFIG_ARCH_OMAP1
1089         omap_dsp_release_mem();
1090         clk_disable(dsp_ck_handle);
1091 #endif
1092 }
1093
1094 /*
1095  * dsp_exmap()
1096  *
1097  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
1098  * In this case, the buffer for DSP is allocated in this routine,
1099  * then it is mapped.
1100  * On the other hand, for example - frame buffer sharing, calls
1101  * this function with padr set. It means some known address space
1102  * pointed with padr is going to be shared with DSP.
1103  */
1104 static int dsp_exmap(dsp_long_t dspadr, unsigned long padr, unsigned long size,
1105                      enum exmap_type_e type)
1106 {
1107         dsp_mmu_reg_t pgsz;
1108         void *buf;
1109         unsigned int order = 0;
1110         unsigned long unit;
1111         int prev = -1;
1112         dsp_long_t _dspadr = dspadr;
1113         unsigned long _padr = padr;
1114         void *_vadr = dspbyte_to_virt(dspadr);
1115         unsigned long _size = size;
1116         struct tlb_entry tlb_ent;
1117         struct exmap_tbl_entry *exmap_ent;
1118         int status;
1119         int idx;
1120         int i;
1121
1122 #define MINIMUM_PAGESZ  SZ_4KB
1123         /*
1124          * alignment check
1125          */
1126         if (!is_aligned(size, MINIMUM_PAGESZ)) {
1127                 printk(KERN_ERR
1128                        "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
1129                 return -EINVAL;
1130         }
1131         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
1132                 printk(KERN_ERR
1133                        "omapdsp: DSP address(0x%x) is not aligned.\n", dspadr);
1134                 return -EINVAL;
1135         }
1136         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
1137                 printk(KERN_ERR
1138                        "omapdsp: physical address(0x%lx) is not aligned.\n",
1139                        padr);
1140                 return -EINVAL;
1141         }
1142
1143         /* address validity check */
1144         if ((dspadr < dspmem_size) ||
1145             (dspadr >= DSPSPACE_SIZE) ||
1146             ((dspadr + size > DSP_INIT_PAGE) &&
1147              (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
1148                 printk(KERN_ERR
1149                        "omapdsp: illegal address/size for dsp_exmap().\n");
1150                 return -EINVAL;
1151         }
1152
1153         down_write(&exmap_sem);
1154
1155         /* overlap check */
1156         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1157                 unsigned long mapsize;
1158                 struct exmap_tbl_entry *tmp_ent = &exmap_tbl[i];
1159
1160                 if (!tmp_ent->valid)
1161                         continue;
1162                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
1163                 if ((_vadr + size > tmp_ent->vadr) &&
1164                     (_vadr < tmp_ent->vadr + mapsize)) {
1165                         printk(KERN_ERR "omapdsp: exmap page overlap!\n");
1166                         up_write(&exmap_sem);
1167                         return -EINVAL;
1168                 }
1169         }
1170
1171 start:
1172         buf = NULL;
1173         /* Are there any free TLB lines?  */
1174         for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1175                 if (!exmap_tbl[idx].valid)
1176                         goto found_free;
1177         }
1178         printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
1179         status = -EBUSY;
1180         goto fail;
1181
1182 found_free:
1183         exmap_ent = &exmap_tbl[idx];
1184
1185         /*
1186          * we don't use
1187          * 1KB mapping in OMAP1,
1188          * 16MB mapping in OMAP2.
1189          */
1190         if ((_size >= SZ_1MB) &&
1191             (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
1192             is_aligned(_dspadr, SZ_1MB)) {
1193                 unit = SZ_1MB;
1194                 pgsz = DSP_MMU_CAM_PAGESIZE_1MB;
1195         } else if ((_size >= SZ_64KB) &&
1196                    (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
1197                    is_aligned(_dspadr, SZ_64KB)) {
1198                 unit = SZ_64KB;
1199                 pgsz = DSP_MMU_CAM_PAGESIZE_64KB;
1200         } else {
1201                 unit = SZ_4KB;
1202                 pgsz = DSP_MMU_CAM_PAGESIZE_4KB;
1203         }
1204
1205         order = get_order(unit);
1206
1207         /* buffer allocation */
1208         if (type == EXMAP_TYPE_MEM) {
1209                 struct page *page, *ps, *pe;
1210
1211                 if ((order == ORDER_1MB) && likely(kmem_pool_1M))
1212                         buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
1213                 else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
1214                         buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
1215                 else {
1216                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
1217                         if (buf == NULL) {
1218                                 status = -ENOMEM;
1219                                 goto fail;
1220                         }
1221                 }
1222
1223                 /* mark the pages as reserved; this is needed for mmap */
1224                 ps = virt_to_page(buf);
1225                 pe = virt_to_page(buf + unit);
1226
1227                 for (page = ps; page < pe; page++)
1228                         SetPageReserved(page);
1229
1230                 _padr = __pa(buf);
1231         }
1232
1233         /*
1234          * mapping for ARM MMU:
1235          * we should not access to the allocated memory through 'buf'
1236          * since this area should not be cashed.
1237          */
1238         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
1239         if (status < 0)
1240                 goto fail;
1241
1242         /* loading DSP TLB entry */
1243         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
1244         status = dsp_mmu_load_tlb(&tlb_ent);
1245         if (status < 0) {
1246                 exmap_clear_armmmu((unsigned long)_vadr, unit);
1247                 goto fail;
1248         }
1249
1250         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
1251         exmap_ent->link.prev = prev;
1252         if (prev >= 0)
1253                 exmap_tbl[prev].link.next = idx;
1254
1255         if ((_size -= unit) == 0) {     /* normal completion */
1256                 up_write(&exmap_sem);
1257                 return size;
1258         }
1259
1260         _dspadr += unit;
1261         _vadr   += unit;
1262         _padr = padr ? _padr + unit : 0;
1263         prev = idx;
1264         goto start;
1265
1266 fail:
1267         up_write(&exmap_sem);
1268         if (buf)
1269                 dsp_mem_free_pages((unsigned long)buf, order);
1270         dsp_exunmap(dspadr);
1271         return status;
1272 }
1273
1274 static unsigned long unmap_free_arm(struct exmap_tbl_entry *ent)
1275 {
1276         unsigned long size;
1277
1278         /* clearing ARM MMU */
1279         size = 1 << (ent->order + PAGE_SHIFT);
1280         exmap_clear_armmmu((unsigned long)ent->vadr, size);
1281
1282         /* freeing allocated memory */
1283         if (ent->type == EXMAP_TYPE_MEM) {
1284                 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
1285                 printk(KERN_DEBUG
1286                        "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
1287                        size, ent->buf);
1288         }
1289 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1290         else if (ent->type == EXMAP_TYPE_FB) {
1291                 int status;
1292                 if (omapfb_nb) {
1293                         status = omapfb_unregister_client(omapfb_nb);
1294                         if (!status)
1295                                 printk("omapfb_unregister_client(): "
1296                                        "success\n");
1297                         else
1298                                 printk("omapfb_runegister_client(): "
1299                                        "failure(%d)\n", status);
1300                         kfree(omapfb_nb);
1301                         omapfb_nb = NULL;
1302                         omapfb_ready = 0;
1303                 }
1304         }
1305 #endif
1306
1307         return size;
1308 }
1309
1310 static int dsp_exunmap(dsp_long_t dspadr)
1311 {
1312         void *vadr;
1313         unsigned long size;
1314         int total = 0;
1315         struct exmap_tbl_entry *ent;
1316         int idx;
1317
1318         vadr = dspbyte_to_virt(dspadr);
1319         down_write(&exmap_sem);
1320         for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1321                 ent = &exmap_tbl[idx];
1322                 if ((!ent->valid) || ent->prsvd)
1323                         continue;
1324                 if (ent->vadr == vadr)
1325                         goto found_map;
1326         }
1327         up_write(&exmap_sem);
1328         printk(KERN_WARNING
1329                "omapdsp: address %06x not found in exmap_tbl.\n", dspadr);
1330         return -EINVAL;
1331
1332 found_map:
1333         if (ent->usecount > 0) {
1334                 printk(KERN_ERR
1335                        "omapdsp: exmap reference count is not 0.\n"
1336                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
1337                        idx, ent->vadr, ent->order, ent->usecount);
1338                 up_write(&exmap_sem);
1339                 return -EINVAL;
1340         }
1341         /* clearing DSP TLB entry */
1342         dsp_mmu_clear_tlb(dspadr);
1343
1344         /* clear ARM MMU and free buffer */
1345         size = unmap_free_arm(ent);
1346         ent->valid = 0;
1347         total += size;
1348
1349         /* we don't free PTEs */
1350
1351         /* flush TLB */
1352         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
1353
1354         if ((idx = ent->link.next) < 0)
1355                 goto up_out;    /* normal completion */
1356         ent = &exmap_tbl[idx];
1357         dspadr += size;
1358         vadr   += size;
1359         if (ent->vadr == vadr)
1360                 goto found_map; /* continue */
1361
1362         printk(KERN_ERR
1363                "omapdsp: illegal exmap_tbl grouping!\n"
1364                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1365                vadr, idx, ent->vadr);
1366         up_write(&exmap_sem);
1367         return -EINVAL;
1368
1369 up_out:
1370         up_write(&exmap_sem);
1371         return total;
1372 }
1373
1374 static void exmap_flush(void)
1375 {
1376         struct exmap_tbl_entry *ent;
1377         int i;
1378
1379         down_write(&exmap_sem);
1380
1381         /* clearing DSP TLB entry */
1382         dsp_mmu_gflush();
1383
1384         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1385                 ent = &exmap_tbl[i];
1386                 if (ent->valid && (!ent->prsvd)) {
1387                         unmap_free_arm(ent);
1388                         ent->valid = 0;
1389                 }
1390         }
1391
1392         /* flush TLB */
1393         flush_tlb_kernel_range(dspmem_base + dspmem_size,
1394                                dspmem_base + DSPSPACE_SIZE);
1395         up_write(&exmap_sem);
1396 }
1397
1398 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1399 #ifndef CONFIG_FB
1400 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1401 #endif /* CONFIG_FB */
1402
1403 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1404 static int omapfb_notifier_cb(struct notifier_block *omapfb_nb,
1405                               unsigned long event, void *fbi)
1406 {
1407         /* XXX */
1408         printk("omapfb_notifier_cb(): event = %s\n",
1409                (event == OMAPFB_EVENT_READY)    ? "READY" :
1410                (event == OMAPFB_EVENT_DISABLED) ? "DISABLED" : "Unknown");
1411         if (event == OMAPFB_EVENT_READY)
1412                 omapfb_ready = 1;
1413         else if (event == OMAPFB_EVENT_DISABLED)
1414                 omapfb_ready = 0;
1415         return 0;
1416 }
1417 #endif
1418
1419 static int dsp_fbexport(dsp_long_t *dspadr)
1420 {
1421         dsp_long_t dspadr_actual;
1422         unsigned long padr_sys, padr, fbsz_sys, fbsz;
1423         int cnt;
1424 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1425         int status;
1426 #endif
1427
1428         printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1429
1430 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1431         if (omapfb_nb) {
1432                 printk(KERN_WARNING
1433                        "omapdsp: frame buffer has been exported already!\n");
1434                 return -EBUSY;
1435         }
1436 #endif
1437
1438         if (num_registered_fb == 0) {
1439                 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1440                 return -EINVAL;
1441         }
1442         if (num_registered_fb != 1) {
1443                 printk(KERN_INFO
1444                        "omapdsp: %d frame buffers found. we use first one.\n",
1445                        num_registered_fb);
1446         }
1447         padr_sys = registered_fb[0]->fix.smem_start;
1448         fbsz_sys = registered_fb[0]->fix.smem_len;
1449         if (fbsz_sys == 0) {
1450                 printk(KERN_ERR
1451                        "omapdsp: framebuffer doesn't seem to be configured "
1452                        "correctly! (size=0)\n");
1453                 return -EINVAL;
1454         }
1455
1456         /*
1457          * align padr and fbsz to 4kB boundary
1458          * (should be noted to the user afterwards!)
1459          */
1460         padr = padr_sys & ~(SZ_4KB-1);
1461         fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1462
1463         /* line up dspadr offset with padr */
1464         dspadr_actual =
1465                 (fbsz > SZ_1MB) ?  lineup_offset(*dspadr, padr, SZ_1MB-1) :
1466                 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1467                 /* (fbsz > SZ_4KB) ? */ *dspadr;
1468         if (dspadr_actual != *dspadr)
1469                 printk(KERN_DEBUG
1470                        "omapdsp: actual dspadr for FBEXPORT = %08x\n",
1471                        dspadr_actual);
1472         *dspadr = dspadr_actual;
1473
1474         cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1475         if (cnt < 0) {
1476                 printk(KERN_ERR "omapdsp: exmap failure.\n");
1477                 return cnt;
1478         }
1479
1480         if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1481                 printk(KERN_WARNING
1482 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1483 "  !!  screen base address or size is not aligned in 4kB:           !!\n"
1484 "  !!    actual screen  adr = %08lx, size = %08lx             !!\n"
1485 "  !!    exporting      adr = %08lx, size = %08lx             !!\n"
1486 "  !!  Make sure that the framebuffer is allocated with 4kB-order!  !!\n"
1487 "  !!  Otherwise DSP can corrupt the kernel memory.                 !!\n"
1488 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1489                        padr_sys, fbsz_sys, padr, fbsz);
1490         }
1491
1492 #ifdef CONFIG_ARCH_OMAP1
1493         /* increase the DMA priority */
1494         set_emiff_dma_prio(15);
1495 #endif
1496
1497 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1498         omapfb_nb = kzalloc(sizeof(struct omapfb_notifier_block), GFP_KERNEL);
1499         if (omapfb_nb == NULL) {
1500                 printk(KERN_ERR
1501                        "omapdsp: failed to allocate memory for omapfb_nb!\n");
1502                 dsp_exunmap(dspadr_actual);
1503                 return -ENOMEM;
1504         }
1505         status = omapfb_register_client(omapfb_nb, omapfb_notifier_cb, NULL);
1506         if (!status)
1507                 printk("omapfb_register_client(): success\n");
1508         else
1509                 printk("omapfb_register_client(): failure(%d)\n", status);
1510 #endif
1511
1512         return cnt;
1513 }
1514
1515 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1516
1517 static int dsp_fbexport(dsp_long_t *dspadr)
1518 {
1519         printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1520         return -EINVAL;
1521 }
1522
1523 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1524
1525 static void exmap_setup_preserved_mem_page(void *buf, dsp_long_t dspadr,
1526                                            int exmap_idx)
1527 {
1528         unsigned long phys;
1529         void *virt;
1530         struct tlb_entry tlb_ent;
1531
1532         phys = __pa(buf);
1533         virt = dspbyte_to_virt(dspadr);
1534         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1535         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], buf, virt);
1536         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1537         dsp_mmu_load_tlb(&tlb_ent);
1538 }
1539
1540 static void exmap_clear_mem_page(dsp_long_t dspadr)
1541 {
1542         void *virt;
1543
1544         virt = dspbyte_to_virt(dspadr);
1545         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1546         /* DSP MMU is shutting down. not handled here. */
1547 }
1548
1549 #ifdef CONFIG_ARCH_OMAP2
1550 static void exmap_setup_iomap_page(unsigned long phys, unsigned long dsp_io_adr,
1551                                    int exmap_idx)
1552 {
1553         dsp_long_t dspadr;
1554         void *virt;
1555         struct tlb_entry tlb_ent;
1556
1557         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1558         virt = dspbyte_to_virt(dspadr);
1559         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1560         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], NULL, virt);
1561         INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
1562         dsp_mmu_load_tlb(&tlb_ent);
1563 }
1564
1565 static void exmap_clear_iomap_page(unsigned long dsp_io_adr)
1566 {
1567         dsp_long_t dspadr;
1568         void *virt;
1569
1570         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1571         virt = dspbyte_to_virt(dspadr);
1572         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1573         /* DSP MMU is shutting down. not handled here. */
1574 }
1575 #endif /* CONFIG_ARCH_OMAP2 */
1576
1577 #define OMAP2420_GPT5_BASE      (L4_24XX_BASE + 0x7c000)
1578 #define OMAP2420_GPT6_BASE      (L4_24XX_BASE + 0x7e000)
1579 #define OMAP2420_GPT7_BASE      (L4_24XX_BASE + 0x80000)
1580 #define OMAP2420_GPT8_BASE      (L4_24XX_BASE + 0x82000)
1581 #define OMAP24XX_EAC_BASE       (L4_24XX_BASE + 0x90000)
1582
1583 static int exmap_setup_preserved_entries(void)
1584 {
1585         int n = 0;
1586
1587         exmap_setup_preserved_mem_page(dspvect_page, DSP_INIT_PAGE, n++);
1588 #ifdef CONFIG_ARCH_OMAP2
1589         exmap_setup_iomap_page(OMAP24XX_PRCM_BASE,     0x7000, n++);
1590 #ifdef CONFIG_ARCH_OMAP2420
1591         exmap_setup_iomap_page(OMAP2420_GPT5_BASE,     0xe000, n++);
1592         exmap_setup_iomap_page(OMAP2420_GPT6_BASE,     0xe800, n++);
1593         exmap_setup_iomap_page(OMAP2420_GPT7_BASE,     0xf000, n++);
1594         exmap_setup_iomap_page(OMAP2420_GPT8_BASE,     0xf800, n++);
1595 #endif /* CONFIG_ARCH_OMAP2420 */
1596         exmap_setup_iomap_page(OMAP24XX_EAC_BASE,     0x10000, n++);
1597         exmap_setup_iomap_page(OMAP24XX_MAILBOX_BASE, 0x11000, n++);
1598 #endif /* CONFIG_ARCH_OMAP2 */
1599
1600         return n;
1601 }
1602
1603 static void exmap_clear_preserved_entries(void)
1604 {
1605         exmap_clear_mem_page(DSP_INIT_PAGE);
1606 #ifdef CONFIG_ARCH_OMAP2
1607         exmap_clear_iomap_page(0x7000);         /* PRCM */
1608 #ifdef CONFIG_ARCH_OMAP2420
1609         exmap_clear_iomap_page(0xe000);         /* GPT5 */
1610         exmap_clear_iomap_page(0xe800);         /* GPT6 */
1611         exmap_clear_iomap_page(0xf000);         /* GPT7 */
1612         exmap_clear_iomap_page(0xf800);         /* GPT8 */
1613 #endif /* CONFIG_ARCH_OMAP2420 */
1614         exmap_clear_iomap_page(0x10000);        /* EAC */
1615         exmap_clear_iomap_page(0x11000);        /* MAILBOX */
1616 #endif /* CONFIG_ARCH_OMAP2 */
1617 }
1618
1619 #ifdef CONFIG_ARCH_OMAP1
1620 static int dsp_mmu_itack(void)
1621 {
1622         unsigned long dspadr;
1623
1624         printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1625         if (!dsp_err_isset(ERRCODE_MMU)) {
1626                 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1627                 return -EINVAL;
1628         }
1629         dspadr = dsp_fault_adr & ~(SZ_4K-1);
1630         dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM);    /* FIXME: reserve TLB entry for this */
1631         printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1632         dsp_set_runlevel(RUNLEVEL_RECOVERY);
1633         __dsp_mmu_itack();
1634         udelay(100);
1635         dsp_exunmap(dspadr);
1636         dsp_err_clear(ERRCODE_MMU);
1637         return 0;
1638 }
1639 #endif /* CONFIG_ARCH_OMAP1 */
1640
1641 #ifdef CONFIG_ARCH_OMAP2
1642 #define MMU_IRQ_MASK \
1643         (DSP_MMU_IRQ_MULTIHITFAULT | \
1644          DSP_MMU_IRQ_TABLEWALKFAULT | \
1645          DSP_MMU_IRQ_EMUMISS | \
1646          DSP_MMU_IRQ_TRANSLATIONFAULT | \
1647          DSP_MMU_IRQ_TLBMISS)
1648 #endif
1649
1650 static int is_mmu_init;
1651
1652 static void dsp_mmu_init(void)
1653 {
1654         struct tlb_lock tlb_lock;
1655
1656 #ifdef CONFIG_ARCH_OMAP1
1657         clk_enable(dsp_ck_handle);
1658         omap_dsp_request_mem();
1659 #endif
1660         down_write(&exmap_sem);
1661
1662 #if defined(CONFIG_ARCH_OMAP1)
1663         dsp_mmu_disable();      /* clear all */
1664         udelay(100);
1665 #elif defined(CONFIG_ARCH_OMAP2)
1666         dsp_mmu_reset();
1667 #endif
1668         dsp_mmu_enable();
1669
1670         /* DSP TLB initialization */
1671         tlb_lock.base   = 0;
1672         tlb_lock.victim = 0;
1673         set_tlb_lock(&tlb_lock);
1674
1675         exmap_preserved_cnt = exmap_setup_preserved_entries();
1676
1677 #ifdef CONFIG_ARCH_OMAP2
1678         /* MMU IRQ mask setup */
1679         dsp_mmu_write_reg(MMU_IRQ_MASK, DSP_MMU_IRQENABLE);
1680 #endif
1681
1682         up_write(&exmap_sem);
1683 #ifdef CONFIG_ARCH_OMAP1
1684         omap_dsp_release_mem();
1685         clk_disable(dsp_ck_handle);
1686 #endif
1687
1688         is_mmu_init = 1;
1689 }
1690
1691 static void dsp_mmu_shutdown(void)
1692 {
1693         if (is_mmu_init) {
1694                 exmap_flush();
1695                 exmap_clear_preserved_entries();
1696                 dsp_mmu_disable();
1697         }
1698 }
1699
1700 #ifdef CONFIG_ARCH_OMAP1
1701 /*
1702  * intmem_enable() / disable():
1703  * if the address is in DSP internal memories,
1704  * we send PM mailbox commands so that DSP DMA domain won't go in idle
1705  * when ARM is accessing to those memories.
1706  */
1707 static int intmem_enable(void)
1708 {
1709         int ret = 0;
1710
1711         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1712                 ret = mbcompose_send(PM, PM_ENABLE, DSPREG_ICR_DMA);
1713
1714         return ret;
1715 }
1716
1717 static void intmem_disable(void) {
1718         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1719                 mbcompose_send(PM, PM_DISABLE, DSPREG_ICR_DMA);
1720 }
1721 #endif /* CONFIG_ARCH_OMAP1 */
1722
1723 /*
1724  * dsp_mem_enable() / disable()
1725  */
1726 #ifdef CONFIG_ARCH_OMAP1
1727 int intmem_usecount;
1728 #endif
1729
1730 int dsp_mem_enable(void *adr)
1731 {
1732         int ret = 0;
1733
1734         if (is_dsp_internal_mem(adr)) {
1735 #ifdef CONFIG_ARCH_OMAP1
1736                 if (intmem_usecount++ == 0)
1737                         ret = omap_dsp_request_mem();
1738 #endif
1739         } else
1740                 down_read(&exmap_sem);
1741
1742         return ret;
1743 }
1744
1745 void dsp_mem_disable(void *adr)
1746 {
1747         if (is_dsp_internal_mem(adr)) {
1748 #ifdef CONFIG_ARCH_OMAP1
1749                 if (--intmem_usecount == 0)
1750                         omap_dsp_release_mem();
1751 #endif
1752         } else
1753                 up_read(&exmap_sem);
1754 }
1755
1756 /* for safety */
1757 #ifdef CONFIG_ARCH_OMAP1
1758 void dsp_mem_usecount_clear(void)
1759 {
1760         if (intmem_usecount != 0) {
1761                 printk(KERN_WARNING
1762                        "omapdsp: unbalanced memory request/release detected.\n"
1763                        "         intmem_usecount is not zero at where "
1764                        "it should be! ... fixed to be zero.\n");
1765                 intmem_usecount = 0;
1766                 omap_dsp_release_mem();
1767         }
1768 }
1769 #endif /* CONFIG_ARCH_OMAP1 */
1770
1771 /*
1772  * dsp_mem file operations
1773  */
1774 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1775 {
1776         loff_t ret;
1777
1778         mutex_lock(&file->f_dentry->d_inode->i_mutex);
1779         switch (orig) {
1780         case 0:
1781                 file->f_pos = offset;
1782                 ret = file->f_pos;
1783                 break;
1784         case 1:
1785                 file->f_pos += offset;
1786                 ret = file->f_pos;
1787                 break;
1788         default:
1789                 ret = -EINVAL;
1790         }
1791         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1792         return ret;
1793 }
1794
1795 static ssize_t intmem_read(struct file *file, char __user *buf, size_t count,
1796                            loff_t *ppos)
1797 {
1798         unsigned long p = *ppos;
1799         void *vadr = dspbyte_to_virt(p);
1800         ssize_t size = dspmem_size;
1801         ssize_t read;
1802
1803         if (p >= size)
1804                 return 0;
1805 #ifdef CONFIG_ARCH_OMAP1
1806         clk_enable(api_ck_handle);
1807 #endif
1808         read = count;
1809         if (count > size - p)
1810                 read = size - p;
1811         if (copy_to_user(buf, vadr, read)) {
1812                 read = -EFAULT;
1813                 goto out;
1814         }
1815         *ppos += read;
1816 out:
1817 #ifdef CONFIG_ARCH_OMAP1
1818         clk_disable(api_ck_handle);
1819 #endif
1820         return read;
1821 }
1822
1823 static ssize_t exmem_read(struct file *file, char __user *buf, size_t count,
1824                           loff_t *ppos)
1825 {
1826         unsigned long p = *ppos;
1827         void *vadr = dspbyte_to_virt(p);
1828
1829         if (!exmap_valid(vadr, count)) {
1830                 printk(KERN_ERR
1831                        "omapdsp: DSP address %08lx / size %08x "
1832                        "is not valid!\n", p, count);
1833                 return -EFAULT;
1834         }
1835         if (count > DSPSPACE_SIZE - p)
1836                 count = DSPSPACE_SIZE - p;
1837         if (copy_to_user(buf, vadr, count))
1838                 return -EFAULT;
1839         *ppos += count;
1840
1841         return count;
1842 }
1843
1844 static ssize_t dsp_mem_read(struct file *file, char __user *buf, size_t count,
1845                             loff_t *ppos)
1846 {
1847         int ret;
1848         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1849
1850         if (dsp_mem_enable(vadr) < 0)
1851                 return -EBUSY;
1852         if (is_dspbyte_internal_mem(*ppos))
1853                 ret = intmem_read(file, buf, count, ppos);
1854         else
1855                 ret = exmem_read(file, buf, count, ppos);
1856         dsp_mem_disable(vadr);
1857
1858         return ret;
1859 }
1860
1861 static ssize_t intmem_write(struct file *file, const char __user *buf,
1862                             size_t count, loff_t *ppos)
1863 {
1864         unsigned long p = *ppos;
1865         void *vadr = dspbyte_to_virt(p);
1866         ssize_t size = dspmem_size;
1867         ssize_t written;
1868
1869         if (p >= size)
1870                 return 0;
1871 #ifdef CONFIG_ARCH_OMAP1
1872         clk_enable(api_ck_handle);
1873 #endif
1874         written = count;
1875         if (count > size - p)
1876                 written = size - p;
1877         if (copy_from_user(vadr, buf, written)) {
1878                 written = -EFAULT;
1879                 goto out;
1880         }
1881         *ppos += written;
1882 out:
1883 #ifdef CONFIG_ARCH_OMAP1
1884         clk_disable(api_ck_handle);
1885 #endif
1886         return written;
1887 }
1888
1889 static ssize_t exmem_write(struct file *file, const char __user *buf,
1890                            size_t count, loff_t *ppos)
1891 {
1892         unsigned long p = *ppos;
1893         void *vadr = dspbyte_to_virt(p);
1894
1895         if (!exmap_valid(vadr, count)) {
1896                 printk(KERN_ERR
1897                        "omapdsp: DSP address %08lx / size %08x "
1898                        "is not valid!\n", p, count);
1899                 return -EFAULT;
1900         }
1901         if (count > DSPSPACE_SIZE - p)
1902                 count = DSPSPACE_SIZE - p;
1903         if (copy_from_user(vadr, buf, count))
1904                 return -EFAULT;
1905         *ppos += count;
1906
1907         return count;
1908 }
1909
1910 static ssize_t dsp_mem_write(struct file *file, const char __user *buf,
1911                              size_t count, loff_t *ppos)
1912 {
1913         int ret;
1914         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1915
1916         if (dsp_mem_enable(vadr) < 0)
1917                 return -EBUSY;
1918         if (is_dspbyte_internal_mem(*ppos))
1919                 ret = intmem_write(file, buf, count, ppos);
1920         else
1921                 ret = exmem_write(file, buf, count, ppos);
1922         dsp_mem_disable(vadr);
1923
1924         return ret;
1925 }
1926
1927 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1928                          unsigned int cmd, unsigned long arg)
1929 {
1930         switch (cmd) {
1931         case MEM_IOCTL_MMUINIT:
1932                 dsp_mmu_init();
1933                 return 0;
1934
1935         case MEM_IOCTL_EXMAP:
1936                 {
1937                         struct omap_dsp_mapinfo mapinfo;
1938                         if (copy_from_user(&mapinfo, (void __user *)arg,
1939                                            sizeof(mapinfo)))
1940                                 return -EFAULT;
1941                         return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1942                                          EXMAP_TYPE_MEM);
1943                 }
1944
1945         case MEM_IOCTL_EXUNMAP:
1946                 return dsp_exunmap((unsigned long)arg);
1947
1948         case MEM_IOCTL_EXMAP_FLUSH:
1949                 exmap_flush();
1950                 return 0;
1951
1952         case MEM_IOCTL_FBEXPORT:
1953                 {
1954                         dsp_long_t dspadr;
1955                         int ret;
1956                         if (copy_from_user(&dspadr, (void __user *)arg,
1957                                            sizeof(dsp_long_t)))
1958                                 return -EFAULT;
1959                         ret = dsp_fbexport(&dspadr);
1960                         if (copy_to_user((void __user *)arg, &dspadr,
1961                                          sizeof(dsp_long_t)))
1962                                 return -EFAULT;
1963                         return ret;
1964                 }
1965
1966 #ifdef CONFIG_ARCH_OMAP1
1967         case MEM_IOCTL_MMUITACK:
1968                 return dsp_mmu_itack();
1969 #endif
1970
1971         case MEM_IOCTL_KMEM_RESERVE:
1972                 {
1973                         __u32 size;
1974                         if (copy_from_user(&size, (void __user *)arg,
1975                                            sizeof(__u32)))
1976                                 return -EFAULT;
1977                         return dsp_kmem_reserve(size);
1978                 }
1979
1980         case MEM_IOCTL_KMEM_RELEASE:
1981                 dsp_kmem_release();
1982                 return 0;
1983
1984         default:
1985                 return -ENOIOCTLCMD;
1986         }
1987 }
1988
1989 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1990 {
1991         /*
1992          * FIXME
1993          */
1994         return -ENOSYS;
1995 }
1996
1997 static int dsp_mem_open(struct inode *inode, struct file *file)
1998 {
1999         if (!capable(CAP_SYS_RAWIO))
2000                 return -EPERM;
2001
2002         return 0;
2003 }
2004
2005 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
2006 /*
2007  * fb update functions:
2008  * fbupd_response() is executed by the workqueue.
2009  * fbupd_cb() is called when fb update is done, in interrupt context.
2010  * mbox_fbupd() is called when KFUNC:FBCTL:UPD is received from DSP.
2011  */
2012 static void fbupd_response(struct work_struct *unused)
2013 {
2014         int status;
2015
2016         status = mbcompose_send(KFUNC, KFUNC_FBCTL, FBCTL_UPD);
2017         if (status < 0) {
2018                 /* FIXME: DSP is busy !! */
2019                 printk(KERN_ERR
2020                        "omapdsp: DSP is busy when trying to send FBCTL:UPD "
2021                        "response!\n");
2022         }
2023 }
2024
2025 static DECLARE_WORK(fbupd_response_work, fbupd_response);
2026
2027 static void fbupd_cb(void *arg)
2028 {
2029         schedule_work(&fbupd_response_work);
2030 }
2031
2032 void mbox_fbctl_upd(void)
2033 {
2034         struct omapfb_update_window win;
2035         volatile unsigned short *buf = ipbuf_sys_da->d;
2036
2037         /* FIXME: try count sometimes exceeds 1000. */
2038         if (sync_with_dsp(&ipbuf_sys_da->s, TID_ANON, 5000) < 0) {
2039                 printk(KERN_ERR "mbox: FBCTL:UPD - IPBUF sync failed!\n");
2040                 return;
2041         }
2042         win.x = buf[0];
2043         win.y = buf[1];
2044         win.width = buf[2];
2045         win.height = buf[3];
2046         win.format = buf[4];
2047         release_ipbuf_pvt(ipbuf_sys_da);
2048
2049         if (!omapfb_ready) {
2050                 printk(KERN_WARNING
2051                        "omapdsp: fbupd() called while HWA742 is not ready!\n");
2052                 return;
2053         }
2054         //printk("calling omapfb_update_window_async()\n");
2055         omapfb_update_window_async(registered_fb[0], &win, fbupd_cb, NULL);
2056 }
2057
2058 #else /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2059
2060 void mbox_fbctl_upd(void)
2061 {
2062 }
2063 #endif /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2064
2065 /*
2066  * sysfs files
2067  */
2068
2069 /* mmu */
2070 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
2071                         char *buf)
2072 {
2073         int len;
2074         struct tlb_lock tlb_lock_org;
2075         int i;
2076
2077 #ifdef CONFIG_ARCH_OMAP1
2078         clk_enable(dsp_ck_handle);
2079         omap_dsp_request_mem();
2080 #endif
2081         down_read(&exmap_sem);
2082
2083         get_tlb_lock(&tlb_lock_org);
2084
2085 #if defined(CONFIG_ARCH_OMAP1)
2086         len = sprintf(buf, "P: preserved, V: valid\n"
2087                            "ety P V size   cam_va     ram_pa ap\n");
2088                          /* 00: P V  4KB 0x300000 0x10171800 FA */
2089 #elif defined(CONFIG_ARCH_OMAP2)
2090         len = sprintf(buf, "P: preserved, V: valid\n"
2091                            "B: big endian, L:little endian, "
2092                            "M: mixed page attribute\n"
2093                            "ety P V size   cam_va     ram_pa E ES M\n");
2094                          /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
2095 #endif
2096
2097         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2098                 struct cam_ram_regset cr;
2099                 struct tlb_lock tlb_lock_tmp;
2100                 struct tlb_entry ent;
2101 #if defined(CONFIG_ARCH_OMAP1)
2102                 char *pgsz_str, *ap_str;
2103 #elif defined(CONFIG_ARCH_OMAP2)
2104                 char *pgsz_str, *elsz_str;
2105 #endif
2106
2107                 /* read a TLB entry */
2108                 tlb_lock_tmp.base   = tlb_lock_org.base;
2109                 tlb_lock_tmp.victim = i;
2110                 __read_tlb(&tlb_lock_tmp, &cr);
2111
2112 #if defined(CONFIG_ARCH_OMAP1)
2113                 ent.pgsz  = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
2114                 ent.prsvd = cr.cam_l & DSP_MMU_CAM_P;
2115                 ent.valid = cr.cam_l & DSP_MMU_CAM_V;
2116                 ent.ap    = cr.ram_l & DSP_MMU_RAM_L_AP_MASK;
2117                 ent.va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
2118                          (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
2119                 ent.pa = (unsigned long)cr.ram_h << 16 |
2120                          (cr.ram_l & DSP_MMU_RAM_L_RAM_LSB_MASK);
2121
2122                 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
2123                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2124                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
2125                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1KB)  ? " 1KB":
2126                                                                      " ???";
2127                 ap_str = (ent.ap == DSP_MMU_RAM_L_AP_RO) ? "RO":
2128                          (ent.ap == DSP_MMU_RAM_L_AP_FA) ? "FA":
2129                          (ent.ap == DSP_MMU_RAM_L_AP_NA) ? "NA":
2130                                                            "??";
2131 #elif defined(CONFIG_ARCH_OMAP2)
2132                 ent.pgsz   = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
2133                 ent.prsvd  = cr.cam & DSP_MMU_CAM_P;
2134                 ent.valid  = cr.cam & DSP_MMU_CAM_V;
2135                 ent.va     = cr.cam & DSP_MMU_CAM_VATAG_MASK;
2136                 ent.endian = cr.ram & DSP_MMU_RAM_ENDIANNESS;
2137                 ent.elsz   = cr.ram & DSP_MMU_RAM_ELEMENTSIZE_MASK;
2138                 ent.pa     = cr.ram & DSP_MMU_RAM_PADDR_MASK;
2139                 ent.mixed  = cr.ram & DSP_MMU_RAM_MIXED;
2140
2141                 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
2142                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
2143                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2144                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
2145                                                                      " ???";
2146                 elsz_str = (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_8)  ? " 8":
2147                            (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_16) ? "16":
2148                            (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_32) ? "32":
2149                                                                       "??";
2150 #endif
2151
2152                 if (i == tlb_lock_org.base)
2153                         len += sprintf(buf + len, "lock base = %d\n",
2154                                        tlb_lock_org.base);
2155                 if (i == tlb_lock_org.victim)
2156                         len += sprintf(buf + len, "victim    = %d\n",
2157                                        tlb_lock_org.victim);
2158 #if defined(CONFIG_ARCH_OMAP1)
2159                 len += sprintf(buf + len,
2160                                /* 00: P V  4KB 0x300000 0x10171800 FA */
2161                                "%02d: %c %c %s 0x%06x 0x%08lx %s\n",
2162                                i,
2163                                ent.prsvd ? 'P' : ' ',
2164                                ent.valid ? 'V' : ' ',
2165                                pgsz_str, ent.va, ent.pa, ap_str);
2166 #elif defined(CONFIG_ARCH_OMAP2)
2167                 len += sprintf(buf + len,
2168                                /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
2169                                "%02d: %c %c %s 0x%06x 0x%08lx %c %s %c\n",
2170                                i,
2171                                ent.prsvd ? 'P' : ' ',
2172                                ent.valid ? 'V' : ' ',
2173                                pgsz_str, ent.va, ent.pa,
2174                                ent.endian ? 'B' : 'L',
2175                                elsz_str,
2176                                ent.mixed ? 'M' : ' ');
2177 #endif /* CONFIG_ARCH_OMAP2 */
2178         }
2179
2180         /* restore victim entry */
2181         set_tlb_lock(&tlb_lock_org);
2182
2183         up_read(&exmap_sem);
2184 #ifdef CONFIG_ARCH_OMAP1
2185         omap_dsp_release_mem();
2186         clk_disable(dsp_ck_handle);
2187 #endif
2188         return len;
2189 }
2190
2191 /* exmap */
2192 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
2193                           char *buf)
2194 {
2195         int len;
2196         int i;
2197
2198         down_read(&exmap_sem);
2199         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
2200                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
2201         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2202                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
2203                 void *vadr;
2204                 unsigned long size;
2205                 enum exmap_type_e type;
2206                 int idx;
2207
2208                 /* find a top of link */
2209                 if (!ent->valid || (ent->link.prev >= 0))
2210                         continue;
2211
2212                 vadr = ent->vadr;
2213                 type = ent->type;
2214                 size = 0;
2215                 idx = i;
2216                 do {
2217                         ent = &exmap_tbl[idx];
2218                         size += PAGE_SIZE << ent->order;
2219                 } while ((idx = ent->link.next) >= 0);
2220
2221                 len += sprintf(buf + len, "0x%06x %#8lx",
2222                                virt_to_dspbyte(vadr), size);
2223
2224                 if (type == EXMAP_TYPE_FB) {
2225                         len += sprintf(buf + len, "    framebuf\n");
2226                 } else {
2227                         len += sprintf(buf + len, "\n");
2228                         idx = i;
2229                         do {
2230                                 ent = &exmap_tbl[idx];
2231                                 len += sprintf(buf + len,
2232                                                /* 0xc0171000 0x100000  0*/
2233                                                "%19s0x%8p %#8lx %2d\n",
2234                                                "", ent->buf,
2235                                                PAGE_SIZE << ent->order,
2236                                                ent->usecount);
2237                         } while ((idx = ent->link.next) >= 0);
2238                 }
2239         }
2240
2241         up_read(&exmap_sem);
2242         return len;
2243 }
2244
2245 /* mempool */
2246 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
2247                             char *buf)
2248 {
2249         int min_nr_1M = 0, curr_nr_1M = 0;
2250         int min_nr_64K = 0, curr_nr_64K = 0;
2251         int total = 0;
2252
2253         if (likely(kmem_pool_1M)) {
2254                 min_nr_1M  = kmem_pool_1M->min_nr;
2255                 curr_nr_1M = kmem_pool_1M->curr_nr;
2256                 total += min_nr_1M * SZ_1MB;
2257         }
2258         if (likely(kmem_pool_64K)) {
2259                 min_nr_64K  = kmem_pool_64K->min_nr;
2260                 curr_nr_64K = kmem_pool_64K->curr_nr;
2261                 total += min_nr_64K * SZ_64KB;
2262         }
2263
2264         return sprintf(buf,
2265                        "0x%x\n"
2266                        "1M  buffer: %d (%d free)\n"
2267                        "64K buffer: %d (%d free)\n",
2268                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
2269 }
2270
2271 /*
2272  * workqueue for mmu int
2273  */
2274 #ifdef CONFIG_ARCH_OMAP1
2275 /*
2276  * MMU fault mask:
2277  * We ignore prefetch err.
2278  */
2279 #define MMUFAULT_MASK \
2280         (DSP_MMU_FAULT_ST_PERM |\
2281          DSP_MMU_FAULT_ST_TLB_MISS |\
2282          DSP_MMU_FAULT_ST_TRANS)
2283 #endif /* CONFIG_ARCH_OMAP1 */
2284
2285 static void do_mmu_int(struct work_struct *unused)
2286 {
2287 #if defined(CONFIG_ARCH_OMAP1)
2288
2289         dsp_mmu_reg_t status;
2290         dsp_mmu_reg_t adh, adl;
2291         dsp_mmu_reg_t dp;
2292
2293         status = dsp_mmu_read_reg(DSP_MMU_FAULT_ST);
2294         adh = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_H);
2295         adl = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_L);
2296         dp = adh & DSP_MMU_FAULT_AD_H_DP;
2297         dsp_fault_adr = MK32(adh & DSP_MMU_FAULT_AD_H_ADR_MASK, adl);
2298
2299         /* if the fault is masked, nothing to do */
2300         if ((status & MMUFAULT_MASK) == 0) {
2301                 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
2302                 /*
2303                  * note: in OMAP1710,
2304                  * when CACHE + DMA domain gets out of idle in DSP,
2305                  * MMU interrupt occurs but DSP_MMU_FAULT_ST is not set.
2306                  * in this case, we just ignore the interrupt.
2307                  */
2308                 if (status) {
2309                         printk(KERN_DEBUG "%s%s%s%s\n",
2310                                (status & DSP_MMU_FAULT_ST_PREF)?
2311                                         "  (prefetch err)" : "",
2312                                (status & DSP_MMU_FAULT_ST_PERM)?
2313                                         "  (permission fault)" : "",
2314                                (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2315                                         "  (TLB miss)" : "",
2316                                (status & DSP_MMU_FAULT_ST_TRANS) ?
2317                                         "  (translation fault)": "");
2318                         printk(KERN_DEBUG "fault address = %#08x\n",
2319                                dsp_fault_adr);
2320                 }
2321                 enable_irq(omap_dsp->mmu_irq);
2322                 return;
2323         }
2324
2325 #elif defined(CONFIG_ARCH_OMAP2)
2326
2327         dsp_mmu_reg_t status;
2328
2329         status = dsp_mmu_read_reg(DSP_MMU_IRQSTATUS);
2330         dsp_fault_adr = dsp_mmu_read_reg(DSP_MMU_FAULT_AD);
2331
2332 #endif /* CONFIG_ARCH_OMAP2 */
2333
2334         printk(KERN_INFO "DSP MMU interrupt!\n");
2335
2336 #if defined(CONFIG_ARCH_OMAP1)
2337
2338         printk(KERN_INFO "%s%s%s%s\n",
2339                (status & DSP_MMU_FAULT_ST_PREF)?
2340                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PREF)?
2341                                 "  prefetch err":
2342                                 "  (prefetch err)":
2343                                 "",
2344                (status & DSP_MMU_FAULT_ST_PERM)?
2345                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PERM)?
2346                                 "  permission fault":
2347                                 "  (permission fault)":
2348                                 "",
2349                (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2350                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TLB_MISS)?
2351                                 "  TLB miss":
2352                                 "  (TLB miss)":
2353                                 "",
2354                (status & DSP_MMU_FAULT_ST_TRANS)?
2355                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TRANS)?
2356                                 "  translation fault":
2357                                 "  (translation fault)":
2358                                 "");
2359
2360 #elif defined(CONFIG_ARCH_OMAP2)
2361
2362         printk(KERN_INFO "%s%s%s%s%s\n",
2363                (status & DSP_MMU_IRQ_MULTIHITFAULT)?
2364                         (MMU_IRQ_MASK & DSP_MMU_IRQ_MULTIHITFAULT)?
2365                                 "  multi hit":
2366                                 "  (multi hit)":
2367                                 "",
2368                (status & DSP_MMU_IRQ_TABLEWALKFAULT)?
2369                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TABLEWALKFAULT)?
2370                                 "  table walk fault":
2371                                 "  (table walk fault)":
2372                                 "",
2373                (status & DSP_MMU_IRQ_EMUMISS)?
2374                         (MMU_IRQ_MASK & DSP_MMU_IRQ_EMUMISS)?
2375                                 "  EMU miss":
2376                                 "  (EMU miss)":
2377                                 "",
2378                (status & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2379                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2380                                 "  translation fault":
2381                                 "  (translation fault)":
2382                                 "",
2383                (status & DSP_MMU_IRQ_TLBMISS)?
2384                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TLBMISS)?
2385                                 "  TLB miss":
2386                                 "  (TLB miss)":
2387                                 "");
2388
2389 #endif /* CONFIG_ARCH_OMAP2 */
2390
2391         printk(KERN_INFO "fault address = %#08x\n", dsp_fault_adr);
2392
2393         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
2394                 dsp_err_set(ERRCODE_MMU, (unsigned long)dsp_fault_adr);
2395         else {
2396 #ifdef CONFIG_ARCH_OMAP1
2397                 __dsp_mmu_itack();
2398 #endif
2399                 printk(KERN_INFO "Resetting DSP...\n");
2400                 dsp_cpustat_request(CPUSTAT_RESET);
2401                 /*
2402                  * if we enable followings, semaphore lock should be avoided.
2403                  *
2404                 printk(KERN_INFO "Flushing DSP MMU...\n");
2405                 exmap_flush();
2406                 dsp_mmu_init();
2407                  */
2408         }
2409
2410 #ifdef CONFIG_ARCH_OMAP2
2411         dsp_mmu_disable();
2412         dsp_mmu_write_reg(status, DSP_MMU_IRQSTATUS);
2413         dsp_mmu_enable();
2414 #endif
2415
2416         enable_irq(omap_dsp->mmu_irq);
2417 }
2418
2419 static DECLARE_WORK(mmu_int_work, do_mmu_int);
2420
2421 /*
2422  * DSP MMU interrupt handler
2423  */
2424
2425 static irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id)
2426 {
2427         disable_irq(omap_dsp->mmu_irq);
2428         schedule_work(&mmu_int_work);
2429         return IRQ_HANDLED;
2430 }
2431
2432 /*
2433  *
2434  */
2435 struct file_operations dsp_mem_fops = {
2436         .owner   = THIS_MODULE,
2437         .llseek  = dsp_mem_lseek,
2438         .read    = dsp_mem_read,
2439         .write   = dsp_mem_write,
2440         .ioctl   = dsp_mem_ioctl,
2441         .mmap    = dsp_mem_mmap,
2442         .open    = dsp_mem_open,
2443 };
2444
2445 void dsp_mem_start(void)
2446 {
2447 #ifdef CONFIG_ARCH_OMAP1
2448         dsp_register_mem_cb(intmem_enable, intmem_disable);
2449 #endif
2450 }
2451
2452 void dsp_mem_stop(void)
2453 {
2454         memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
2455 #ifdef CONFIG_ARCH_OMAP1
2456         dsp_unregister_mem_cb();
2457 #endif
2458 }
2459
2460 /*
2461  * later half of dsp memory initialization
2462  */
2463 void dsp_mem_late_init(void)
2464 {
2465 #ifdef CONFIG_ARCH_OMAP2
2466         int i;
2467         int dspmem_pg_count;
2468
2469         dspmem_pg_count = dspmem_size >> 12;
2470         for (i = 0; i < dspmem_pg_count; i++) {
2471                 dsp_ipi_write_reg(i, DSP_IPI_INDEX);
2472                 dsp_ipi_write_reg(DSP_IPI_ENTRY_ELMSIZEVALUE_16,
2473                                   DSP_IPI_ENTRY);
2474         }
2475         dsp_ipi_write_reg(1, DSP_IPI_ENABLE);
2476         dsp_ipi_write_reg(IOMAP_VAL, DSP_IPI_IOMAP);
2477 #endif
2478         dsp_mmu_init();
2479 }
2480
2481 static char devid_mmu;
2482
2483 int __init dsp_mem_init(void)
2484 {
2485         int i, ret;
2486
2487         for (i = 0; i < DSP_MMU_TLB_LINES; i++)
2488                 exmap_tbl[i].valid = 0;
2489
2490         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
2491         if (dspvect_page == NULL) {
2492                 printk(KERN_ERR
2493                        "omapdsp: failed to allocate memory "
2494                        "for dsp vector table\n");
2495                 return -ENOMEM;
2496         }
2497
2498         /*
2499          * DSP MMU interrupt setup
2500          */
2501         ret = request_irq(omap_dsp->mmu_irq, dsp_mmu_interrupt, IRQF_DISABLED,
2502                           "dsp_mmu",  &devid_mmu);
2503         if (ret) {
2504                 printk(KERN_ERR
2505                        "failed to register DSP MMU interrupt: %d\n", ret);
2506                 return ret;
2507         }
2508
2509         /* MMU interrupt is not enabled until DSP runs */
2510         disable_irq(omap_dsp->mmu_irq);
2511
2512         ret = device_create_file(omap_dsp->dev, &dev_attr_mmu);
2513         ret |= device_create_file(omap_dsp->dev, &dev_attr_exmap);
2514         ret |= device_create_file(omap_dsp->dev, &dev_attr_mempool);
2515         if (ret)
2516                 printk(KERN_ERR "device_create_file failed: %d\n", ret);
2517
2518         return 0;
2519 }
2520
2521 void dsp_mem_exit(void)
2522 {
2523         free_irq(omap_dsp->mmu_irq, &devid_mmu);
2524
2525         /* recover disable_depth */
2526         enable_irq(omap_dsp->mmu_irq);
2527
2528 #ifdef CONFIG_ARCH_OMAP1
2529         dsp_reset_idle_boot_base();
2530 #endif
2531         dsp_mmu_shutdown();
2532         dsp_kmem_release();
2533
2534         if (dspvect_page != NULL) {
2535                 free_page((unsigned long)dspvect_page);
2536                 dspvect_page = NULL;
2537         }
2538
2539         device_remove_file(omap_dsp->dev, &dev_attr_mmu);
2540         device_remove_file(omap_dsp->dev, &dev_attr_exmap);
2541         device_remove_file(omap_dsp->dev, &dev_attr_mempool);
2542 }