]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dsp/dsp_mem.c
ARM: OMAP: mailbox restructure
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dsp / dsp_mem.c
1 /*
2  * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
3  *
4  * Copyright (C) 2002-2006 Nokia Corporation. All rights reserved.
5  *
6  * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
7  *
8  * Conversion to mempool API and ARM MMU section mapping
9  * by Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * version 2 as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23  * 02110-1301 USA
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/fs.h>
30 #include <linux/fb.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/mempool.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <asm/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h>
41 #include <asm/arch/tc.h>
42 #include <asm/arch/omapfb.h>
43 #include <asm/arch/mailbox.h>
44 #include <asm/arch/dsp_common.h>
45 #include "uaccess_dsp.h"
46 #include "dsp_mbcmd.h"
47 #include "dsp.h"
48 #include "ioctl.h"
49 #include "ipbuf.h"
50
51 #ifdef CONFIG_ARCH_OMAP2
52 #define IOMAP_VAL       0x3f
53 #endif
54
55 #define SZ_1KB  0x400
56 #define SZ_4KB  0x1000
57 #define SZ_64KB 0x10000
58 #define SZ_1MB  0x100000
59 #define SZ_16MB 0x1000000
60 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
61 #define ORDER_4KB       (12 - PAGE_SHIFT)
62 #define ORDER_64KB      (16 - PAGE_SHIFT)
63 #define ORDER_1MB       (20 - PAGE_SHIFT)
64
65 /*
66  * absorb DSP MMU register size and location difference
67  */
68 #if defined(CONFIG_ARCH_OMAP1)
69 typedef u16 dsp_mmu_reg_t;
70 #define dsp_mmu_read_reg(a)     omap_readw(a)
71 #define dsp_mmu_write_reg(v,a)  omap_writew(v,a)
72 #elif defined(CONFIG_ARCH_OMAP2)
73 typedef u32 dsp_mmu_reg_t;
74 #define dsp_mmu_read_reg(a)     readl(a)
75 #define dsp_mmu_write_reg(v,a)  writel(v,a)
76 #define dsp_ipi_read_reg(a)     readl(a)
77 #define dsp_ipi_write_reg(v,a)  writel(v,a)
78 #endif
79
80 #if defined(CONFIG_ARCH_OMAP1)
81
82 #define dsp_mmu_enable() \
83         do { \
84                 dsp_mmu_write_reg(DSP_MMU_CNTL_MMU_EN | DSP_MMU_CNTL_RESET_SW, \
85                                   DSP_MMU_CNTL); \
86         } while(0)
87 #define dsp_mmu_disable() \
88         do { \
89                 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
90         } while(0)
91 #define __dsp_mmu_itack() \
92         do { \
93                 dsp_mmu_write_reg(DSP_MMU_IT_ACK_IT_ACK, DSP_MMU_IT_ACK); \
94         } while(0)
95
96 #elif defined(CONFIG_ARCH_OMAP2)
97
98 #define dsp_mmu_enable() \
99         do { \
100                 dsp_mmu_write_reg(DSP_MMU_CNTL_MMUENABLE, DSP_MMU_CNTL); \
101         } while(0)
102 #define dsp_mmu_disable() \
103         do { \
104                 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
105         } while(0)
106 #define dsp_mmu_reset() \
107         do { \
108                 dsp_mmu_write_reg(dsp_mmu_read_reg(DSP_MMU_SYSCONFIG) | \
109                                   DSP_MMU_SYSCONFIG_SOFTRESET, \
110                                   DSP_MMU_SYSCONFIG); \
111         } while(0)
112
113 #endif /* CONFIG_ARCH_OMAP2 */
114
115 #define dsp_mmu_flush() \
116         do { \
117                 dsp_mmu_write_reg(DSP_MMU_FLUSH_ENTRY_FLUSH_ENTRY, \
118                                   DSP_MMU_FLUSH_ENTRY); \
119         } while(0)
120 #define __dsp_mmu_gflush() \
121         do { \
122                 dsp_mmu_write_reg(DSP_MMU_GFLUSH_GFLUSH, DSP_MMU_GFLUSH); \
123         } while(0)
124
125 /*
126  * absorb register name difference
127  */
128 #ifdef CONFIG_ARCH_OMAP1
129 #define DSP_MMU_CAM_P                   DSP_MMU_CAM_L_P
130 #define DSP_MMU_CAM_V                   DSP_MMU_CAM_L_V
131 #define DSP_MMU_CAM_PAGESIZE_MASK       DSP_MMU_CAM_L_PAGESIZE_MASK
132 #define DSP_MMU_CAM_PAGESIZE_1MB        DSP_MMU_CAM_L_PAGESIZE_1MB
133 #define DSP_MMU_CAM_PAGESIZE_64KB       DSP_MMU_CAM_L_PAGESIZE_64KB
134 #define DSP_MMU_CAM_PAGESIZE_4KB        DSP_MMU_CAM_L_PAGESIZE_4KB
135 #define DSP_MMU_CAM_PAGESIZE_1KB        DSP_MMU_CAM_L_PAGESIZE_1KB
136 #endif /* CONFIG_ARCH_OMAP1 */
137
138 /*
139  * OMAP1 EMIFF access
140  */
141 #ifdef CONFIG_ARCH_OMAP1
142 #define EMIF_PRIO_LB_MASK       0x0000f000
143 #define EMIF_PRIO_LB_SHIFT      12
144 #define EMIF_PRIO_DMA_MASK      0x00000f00
145 #define EMIF_PRIO_DMA_SHIFT     8
146 #define EMIF_PRIO_DSP_MASK      0x00000070
147 #define EMIF_PRIO_DSP_SHIFT     4
148 #define EMIF_PRIO_MPU_MASK      0x00000007
149 #define EMIF_PRIO_MPU_SHIFT     0
150 #define set_emiff_dma_prio(prio) \
151         do { \
152                 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
153                              ~EMIF_PRIO_DMA_MASK) | \
154                             ((prio) << EMIF_PRIO_DMA_SHIFT), \
155                             OMAP_TC_OCPT1_PRIOR); \
156         } while(0)
157 #endif /* CONFIG_ARCH_OMAP1 */
158
159 enum exmap_type_e {
160         EXMAP_TYPE_MEM,
161         EXMAP_TYPE_FB
162 };
163
164 struct exmap_tbl_entry {
165         unsigned int valid:1;
166         unsigned int prsvd:1;   /* preserved */
167         int usecount;           /* reference count by mmap */
168         enum exmap_type_e type;
169         void *buf;              /* virtual address of the buffer,
170                                  * i.e. 0xc0000000 - */
171         void *vadr;             /* DSP shadow space,
172                                  * i.e. 0xe0000000 - 0xe0ffffff */
173         unsigned int order;
174         struct {
175                 int prev;
176                 int next;
177         } link;                 /* grouping */
178 };
179
180 #define INIT_EXMAP_TBL_ENTRY(ent,b,v,typ,od) \
181         do {\
182                 (ent)->buf       = (b); \
183                 (ent)->vadr      = (v); \
184                 (ent)->valid     = 1; \
185                 (ent)->prsvd     = 0; \
186                 (ent)->usecount  = 0; \
187                 (ent)->type      = (typ); \
188                 (ent)->order     = (od); \
189                 (ent)->link.next = -1; \
190                 (ent)->link.prev = -1; \
191         } while (0)
192
193 #define INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(ent,b,v) \
194         do {\
195                 (ent)->buf       = (b); \
196                 (ent)->vadr      = (v); \
197                 (ent)->valid     = 1; \
198                 (ent)->prsvd     = 1; \
199                 (ent)->usecount  = 0; \
200                 (ent)->type      = EXMAP_TYPE_MEM; \
201                 (ent)->order     = 0; \
202                 (ent)->link.next = -1; \
203                 (ent)->link.prev = -1; \
204         } while (0)
205
206 #define DSP_MMU_TLB_LINES       32
207 static struct exmap_tbl_entry exmap_tbl[DSP_MMU_TLB_LINES];
208 static int exmap_preserved_cnt;
209 static DECLARE_RWSEM(exmap_sem);
210
211 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
212 static struct omapfb_notifier_block *omapfb_nb;
213 static int omapfb_ready;
214 #endif
215
216 struct cam_ram_regset {
217 #if defined(CONFIG_ARCH_OMAP1)
218         dsp_mmu_reg_t cam_h;
219         dsp_mmu_reg_t cam_l;
220         dsp_mmu_reg_t ram_h;
221         dsp_mmu_reg_t ram_l;
222 #elif defined(CONFIG_ARCH_OMAP2)
223         dsp_mmu_reg_t cam;
224         dsp_mmu_reg_t ram;
225 #endif
226 };
227
228 struct tlb_entry {
229         dsp_long_t va;
230         unsigned long pa;
231         dsp_mmu_reg_t pgsz, prsvd, valid;
232 #if defined(CONFIG_ARCH_OMAP1)
233         dsp_mmu_reg_t ap;
234 #elif defined(CONFIG_ARCH_OMAP2)
235         dsp_mmu_reg_t endian, elsz, mixed;
236 #endif
237 };
238
239 #if defined(CONFIG_ARCH_OMAP1)
240 #define INIT_TLB_ENTRY(ent,v,p,ps) \
241         do { \
242                 (ent)->va = (v); \
243                 (ent)->pa = (p); \
244                 (ent)->pgsz = (ps); \
245                 (ent)->prsvd = 0; \
246                 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
247         } while (0)
248 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
249         do { \
250                 (ent)->va = (v); \
251                 (ent)->pa = (p); \
252                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
253                 (ent)->prsvd = DSP_MMU_CAM_P; \
254                 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
255         } while (0)
256 #elif defined(CONFIG_ARCH_OMAP2)
257 #define INIT_TLB_ENTRY(ent,v,p,ps) \
258         do { \
259                 (ent)->va = (v); \
260                 (ent)->pa = (p); \
261                 (ent)->pgsz = (ps); \
262                 (ent)->prsvd = 0; \
263                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
264                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
265                 (ent)->mixed = 0; \
266         } while (0)
267 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
268         do { \
269                 (ent)->va = (v); \
270                 (ent)->pa = (p); \
271                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
272                 (ent)->prsvd = DSP_MMU_CAM_P; \
273                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
274                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
275                 (ent)->mixed = 0; \
276         } while (0)
277 #define INIT_TLB_ENTRY_4KB_ES32_PRESERVED(ent,v,p) \
278         do { \
279                 (ent)->va = (v); \
280                 (ent)->pa = (p); \
281                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
282                 (ent)->prsvd = DSP_MMU_CAM_P; \
283                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
284                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_32; \
285                 (ent)->mixed = 0; \
286         } while (0)
287 #endif
288
289 #if defined(CONFIG_ARCH_OMAP1)
290 #define cam_ram_valid(cr)       ((cr).cam_l & DSP_MMU_CAM_V)
291 #elif defined(CONFIG_ARCH_OMAP2)
292 #define cam_ram_valid(cr)       ((cr).cam & DSP_MMU_CAM_V)
293 #endif
294
295 struct tlb_lock {
296         int base;
297         int victim;
298 };
299
300 static int dsp_exunmap(dsp_long_t dspadr);
301
302 static void *dspvect_page;
303 static u32 dsp_fault_adr;
304 static struct mem_sync_struct mem_sync;
305
306 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
307                         char *buf);
308 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
309                           char *buf);
310 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
311                             char *buf);
312
313 static struct device_attribute dev_attr_mmu =     __ATTR_RO(mmu);
314 static struct device_attribute dev_attr_exmap =   __ATTR_RO(exmap);
315 static struct device_attribute dev_attr_mempool = __ATTR_RO(mempool);
316
317 /*
318  * special mempool function:
319  * hope this goes to mm/mempool.c
320  */
321 static void *mempool_alloc_from_pool(mempool_t *pool, gfp_t gfp_mask)
322 {
323         unsigned long flags;
324
325         spin_lock_irqsave(&pool->lock, flags);
326         if (likely(pool->curr_nr)) {
327                 void *element = pool->elements[--pool->curr_nr];
328                 spin_unlock_irqrestore(&pool->lock, flags);
329                 return element;
330         }
331         spin_unlock_irqrestore(&pool->lock, flags);
332
333         return mempool_alloc(pool, gfp_mask);
334 }
335
336 static __inline__ unsigned long lineup_offset(unsigned long adr,
337                                               unsigned long ref,
338                                               unsigned long mask)
339 {
340         unsigned long newadr;
341
342         newadr = (adr & ~mask) | (ref & mask);
343         if (newadr < adr)
344                 newadr += mask + 1;
345         return newadr;
346 }
347
348 int dsp_mem_sync_inc(void)
349 {
350         if (dsp_mem_enable((void *)dspmem_base) < 0)
351                 return -1;
352         if (mem_sync.DARAM)
353                 mem_sync.DARAM->ad_arm++;
354         if (mem_sync.SARAM)
355                 mem_sync.SARAM->ad_arm++;
356         if (mem_sync.SDRAM)
357                 mem_sync.SDRAM->ad_arm++;
358         dsp_mem_disable((void *)dspmem_base);
359         return 0;
360 }
361
362 /*
363  * dsp_mem_sync_config() is called from mbox1 workqueue
364  */
365 int dsp_mem_sync_config(struct mem_sync_struct *sync)
366 {
367         size_t sync_seq_sz = sizeof(struct sync_seq);
368
369 #ifdef OLD_BINARY_SUPPORT
370         if (sync == NULL) {
371                 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
372                 return 0;
373         }
374 #endif
375         if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
376             (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
377             (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
378                 printk(KERN_ERR
379                        "omapdsp: mem_sync address validation failure!\n"
380                        "  mem_sync.DARAM = 0x%p,\n"
381                        "  mem_sync.SARAM = 0x%p,\n"
382                        "  mem_sync.SDRAM = 0x%p,\n",
383                        sync->DARAM, sync->SARAM, sync->SDRAM);
384                 return -1;
385         }
386         memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
387         return 0;
388 }
389
390 static mempool_t *kmem_pool_1M;
391 static mempool_t *kmem_pool_64K;
392
393 static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
394 {
395         return (void *)__get_dma_pages(gfp, (unsigned int)order);
396 }
397
398 static void dsp_pool_free(void *buf, void *order)
399 {
400         free_pages((unsigned long)buf, (unsigned int)order);
401 }
402
403 static void dsp_kmem_release(void)
404 {
405         if (kmem_pool_64K) {
406                 mempool_destroy(kmem_pool_64K);
407                 kmem_pool_64K = NULL;
408         }
409
410         if (kmem_pool_1M) {
411                 mempool_destroy(kmem_pool_1M);
412                 kmem_pool_1M = NULL;
413         }
414 }
415
416 static int dsp_kmem_reserve(unsigned long size)
417 {
418         unsigned long len = size;
419
420         /* alignment check */
421         if (!is_aligned(size, SZ_64KB)) {
422                 printk(KERN_ERR
423                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
424                 return -EINVAL;
425         }
426
427         if (size > DSPSPACE_SIZE) {
428                 printk(KERN_ERR
429                        "omapdsp: size(0x%lx) is larger than DSP memory space "
430                        "size (0x%x.\n", size, DSPSPACE_SIZE);
431                 return -EINVAL;
432         }
433
434         if (size >= SZ_1MB) {
435                 int nr = size >> 20;
436
437                 if (likely(!kmem_pool_1M))
438                         kmem_pool_1M = mempool_create(nr,
439                                                       dsp_pool_alloc,
440                                                       dsp_pool_free,
441                                                       (void *)ORDER_1MB);
442                 else
443                         mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
444                                        GFP_KERNEL);
445
446                 size &= ~(0xf << 20);
447         }
448
449         if (size >= SZ_64KB) {
450                 int nr = size >> 16;
451
452                 if (likely(!kmem_pool_64K))
453                         kmem_pool_64K = mempool_create(nr,
454                                                        dsp_pool_alloc,
455                                                        dsp_pool_free,
456                                                        (void *)ORDER_64KB);
457                 else
458                         mempool_resize(kmem_pool_64K,
459                                        kmem_pool_64K->min_nr + nr, GFP_KERNEL);
460
461                 size &= ~(0xf << 16);
462         }
463
464         if (size)
465                 len -= size;
466
467         return len;
468 }
469
470 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
471 {
472         struct page *page, *ps, *pe;
473
474         ps = virt_to_page(buf);
475         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
476
477         for (page = ps; page < pe; page++)
478                 ClearPageReserved(page);
479
480         if ((order == ORDER_64KB) && likely(kmem_pool_64K))
481                 mempool_free((void *)buf, kmem_pool_64K);
482         else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
483                 mempool_free((void *)buf, kmem_pool_1M);
484         else
485                 free_pages(buf, order);
486 }
487
488 static inline void
489 exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
490 {
491         pgd_t *pgd;
492         pud_t *pud;
493         pmd_t *pmd;
494         pte_t *pte;
495
496         pgd = pgd_offset_k(virt);
497         pud = pud_offset(pgd, virt);
498         pmd = pmd_offset(pud, virt);
499
500         if (pmd_none(*pmd)) {
501                 pte = pte_alloc_one_kernel(&init_mm, 0);
502                 if (!pte)
503                         return;
504
505                 /* note: two PMDs will be set  */
506                 pmd_populate_kernel(&init_mm, pmd, pte);
507         }
508
509         pte = pte_offset_kernel(pmd, virt);
510         set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
511 }
512
513 #if 0
514 static inline int
515 exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
516 {
517         pgd_t *pgd;
518         pud_t *pud;
519         pmd_t *pmd;
520
521         pgd = pgd_offset_k(virt);
522         pud = pud_alloc(&init_mm, pgd, virt);
523         pmd = pmd_alloc(&init_mm, pud, virt);
524
525         if (virt & (1 << 20))
526                 pmd++;
527
528         if (!pmd_none(*pmd))
529                 /* No good, fall back on smaller mappings. */
530                 return -EINVAL;
531
532         *pmd = __pmd(phys | prot);
533         flush_pmd_entry(pmd);
534
535         return 0;
536 }
537 #endif
538
539 /*
540  * ARM MMU operations
541  */
542 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
543                             unsigned long size)
544 {
545         long off;
546         pgprot_t prot_pte;
547         int prot_sect;
548
549         printk(KERN_DEBUG
550                "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
551                virt, phys, size);
552
553         prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
554                             L_PTE_DIRTY | L_PTE_WRITE);
555
556         prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
557                     PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
558
559         if (cpu_architecture() <= CPU_ARCH_ARMv5)
560                 prot_sect |= PMD_BIT4;
561
562         off = phys - virt;
563
564         while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
565                 exmap_alloc_pte(virt, virt + off, prot_pte);
566
567                 virt += PAGE_SIZE;
568                 size -= PAGE_SIZE;
569         }
570
571         /* XXX: Not yet.. confuses dspfb -- PFM. */
572 #if 0
573         while (size >= (PGDIR_SIZE / 2)) {
574                 if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
575                         break;
576
577                 virt += (PGDIR_SIZE / 2);
578                 size -= (PGDIR_SIZE / 2);
579         }
580 #endif
581
582         while (size >= PAGE_SIZE) {
583                 exmap_alloc_pte(virt, virt + off, prot_pte);
584
585                 virt += PAGE_SIZE;
586                 size -= PAGE_SIZE;
587         }
588
589         BUG_ON(size);
590
591         return 0;
592 }
593
594         /* XXX: T.Kobayashi
595          * A process can have old mappings. if we want to clear a pmd,
596          * we need to do it for all proceeses that use the old mapping.
597          */
598 #if 0
599 static inline void
600 exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
601 {
602         pte_t *pte;
603
604         pte = pte_offset_map(pmd, addr);
605         do {
606                 if (pte_none(*pte))
607                         continue;
608
609                 pte_clear(&init_mm, addr, pte);
610         } while (pte++, addr += PAGE_SIZE, addr != end);
611
612         pte_unmap(pte - 1);
613 }
614
615 static inline void
616 exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
617 {
618         pmd_t *pmd;
619         unsigned long next;
620
621         pmd = pmd_offset(pud, addr);
622         do {
623                 next = pmd_addr_end(addr, end);
624
625                 if (addr & (1 << 20))
626                         pmd++;
627
628                 if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
629                         *pmd = __pmd(0);
630                         clean_pmd_entry(pmd);
631                         continue;
632                 }
633
634                 if (pmd_none_or_clear_bad(pmd))
635                         continue;
636
637                 exmap_clear_pte_range(pmd, addr, next);
638         } while (pmd++, addr = next, addr != end);
639 }
640
641 static inline void
642 exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
643 {
644         pud_t *pud;
645         unsigned long next;
646
647         pud = pud_offset(pgd, addr);
648         do {
649                 next = pud_addr_end(addr, end);
650                 if (pud_none_or_clear_bad(pud))
651                         continue;
652
653                 exmap_clear_pmd_range(pud, addr, next);
654         } while (pud++, addr = next, addr != end);
655 }
656 #endif
657
658 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
659 {
660 #if 0
661         unsigned long next, end;
662         pgd_t *pgd;
663
664         printk(KERN_DEBUG
665                "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
666                virt, size);
667
668         pgd = pgd_offset_k(virt);
669         end = virt + size;
670         do {
671                 next = pgd_addr_end(virt, end);
672                 if (pgd_none_or_clear_bad(pgd))
673                         continue;
674
675                 exmap_clear_pud_range(pgd, virt, next);
676         } while (pgd++, virt = next, virt != end);
677 #else
678         pgd_t *pgd;
679         pud_t *pud;
680         pmd_t *pmd;
681         pte_t *pte;
682
683         printk(KERN_DEBUG
684                "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
685                virt, size);
686
687         while (size >= PAGE_SIZE) {
688                 pgd = pgd_offset_k(virt);
689                 pud = pud_offset(pgd, virt);
690                 pmd = pmd_offset(pud, virt);
691                 pte = pte_offset_kernel(pmd, virt);
692
693                 pte_clear(&init_mm, virt, pte);
694                 size -= PAGE_SIZE;
695                 virt += PAGE_SIZE;
696         }
697
698         BUG_ON(size);
699 #endif
700 }
701
702 static int exmap_valid(void *vadr, size_t len)
703 {
704         /* exmap_sem should be held before calling this function */
705         int i;
706
707 start:
708         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
709                 void *mapadr;
710                 unsigned long mapsize;
711                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
712
713                 if (!ent->valid)
714                         continue;
715                 mapadr = (void *)ent->vadr;
716                 mapsize = 1 << (ent->order + PAGE_SHIFT);
717                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
718                         if (vadr + len <= mapadr + mapsize) {
719                                 /* this map covers whole address. */
720                                 return 1;
721                         } else {
722                                 /*
723                                  * this map covers partially.
724                                  * check rest portion.
725                                  */
726                                 len -= mapadr + mapsize - vadr;
727                                 vadr = mapadr + mapsize;
728                                 goto start;
729                         }
730                 }
731         }
732
733         return 0;
734 }
735
736 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
737 {
738         void *ds = (void *)daram_base;
739         void *de = (void *)daram_base + daram_size;
740         void *ss = (void *)saram_base;
741         void *se = (void *)saram_base + saram_size;
742         int ret;
743
744         if ((vadr >= ds) && (vadr < de)) {
745                 if (vadr + len > de)
746                         return MEM_TYPE_CROSSING;
747                 else
748                         return MEM_TYPE_DARAM;
749         } else if ((vadr >= ss) && (vadr < se)) {
750                 if (vadr + len > se)
751                         return MEM_TYPE_CROSSING;
752                 else
753                         return MEM_TYPE_SARAM;
754         } else {
755                 down_read(&exmap_sem);
756                 if (exmap_valid(vadr, len))
757                         ret = MEM_TYPE_EXTERN;
758                 else
759                         ret = MEM_TYPE_NONE;
760                 up_read(&exmap_sem);
761                 return ret;
762         }
763 }
764
765 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
766 {
767         if (dsp_mem_type(p, len) <= 0) {
768                 if (fmt != NULL) {
769                         char s[64];
770                         va_list args;
771
772                         va_start(args, fmt);
773                         vsprintf(s, fmt, args);
774                         va_end(args);
775                         printk(KERN_ERR
776                                "omapdsp: %s address(0x%p) and size(0x%x) is "
777                                "not valid!\n"
778                                "         (crossing different type of memories, or \n"
779                                "          external memory space where no "
780                                "actual memory is mapped)\n",
781                                s, p, len);
782                 }
783                 return -1;
784         }
785
786         return 0;
787 }
788
789 /*
790  * exmap_use(), unuse():
791  * when the mapped area is exported to user space with mmap,
792  * the usecount is incremented.
793  * while the usecount > 0, that area can't be released.
794  */
795 void exmap_use(void *vadr, size_t len)
796 {
797         int i;
798
799         down_write(&exmap_sem);
800         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
801                 void *mapadr;
802                 unsigned long mapsize;
803                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
804
805                 if (!ent->valid)
806                         continue;
807                 mapadr = (void *)ent->vadr;
808                 mapsize = 1 << (ent->order + PAGE_SHIFT);
809                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
810                         ent->usecount++;
811         }
812         up_write(&exmap_sem);
813 }
814
815 void exmap_unuse(void *vadr, size_t len)
816 {
817         int i;
818
819         down_write(&exmap_sem);
820         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
821                 void *mapadr;
822                 unsigned long mapsize;
823                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
824
825                 if (!ent->valid)
826                         continue;
827                 mapadr = (void *)ent->vadr;
828                 mapsize = 1 << (ent->order + PAGE_SHIFT);
829                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
830                         ent->usecount--;
831         }
832         up_write(&exmap_sem);
833 }
834
835 /*
836  * dsp_virt_to_phys()
837  * returns physical address, and sets len to valid length
838  */
839 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
840 {
841         int i;
842
843         if (is_dsp_internal_mem(vadr)) {
844                 /* DSRAM or SARAM */
845                 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
846                 return (unsigned long)vadr;
847         }
848
849         /* EXRAM */
850         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
851                 void *mapadr;
852                 unsigned long mapsize;
853                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
854
855                 if (!ent->valid)
856                         continue;
857                 mapadr = (void *)ent->vadr;
858                 mapsize = 1 << (ent->order + PAGE_SHIFT);
859                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
860                         *len = mapadr + mapsize - vadr;
861                         return __pa(ent->buf) + vadr - mapadr;
862                 }
863         }
864
865         /* valid mapping not found */
866         return 0;
867 }
868
869 /*
870  * DSP MMU operations
871  */
872 #ifdef CONFIG_ARCH_OMAP1
873 static dsp_mmu_reg_t get_cam_l_va_mask(dsp_mmu_reg_t pgsz)
874 {
875         switch (pgsz) {
876         case DSP_MMU_CAM_PAGESIZE_1MB:
877                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
878                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
879         case DSP_MMU_CAM_PAGESIZE_64KB:
880                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
881                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
882         case DSP_MMU_CAM_PAGESIZE_4KB:
883                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
884                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
885         case DSP_MMU_CAM_PAGESIZE_1KB:
886                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
887                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
888         }
889         return 0;
890 }
891 #endif /* CONFIG_ARCH_OMAP1 */
892
893 #if defined(CONFIG_ARCH_OMAP1)
894 #define get_cam_va_mask(pgsz) \
895         ((u32)DSP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
896          (u32)get_cam_l_va_mask(pgsz) << 6)
897 #elif defined(CONFIG_ARCH_OMAP2)
898 #define get_cam_va_mask(pgsz) \
899         ((pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
900          (pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? 0xfff00000 : \
901          (pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
902          (pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? 0xfffff000 : 0)
903 #endif /* CONFIG_ARCH_OMAP2 */
904
905 static void get_tlb_lock(struct tlb_lock *tlb_lock)
906 {
907         dsp_mmu_reg_t lock = dsp_mmu_read_reg(DSP_MMU_LOCK);
908
909         tlb_lock->base = (lock & DSP_MMU_LOCK_BASE_MASK) >>
910                          DSP_MMU_LOCK_BASE_SHIFT;
911         tlb_lock->victim = (lock & DSP_MMU_LOCK_VICTIM_MASK) >>
912                            DSP_MMU_LOCK_VICTIM_SHIFT;
913 }
914
915 static void set_tlb_lock(struct tlb_lock *tlb_lock)
916 {
917         dsp_mmu_write_reg((tlb_lock->base   << DSP_MMU_LOCK_BASE_SHIFT) |
918                           (tlb_lock->victim << DSP_MMU_LOCK_VICTIM_SHIFT),
919                           DSP_MMU_LOCK);
920 }
921
922 static void __read_tlb(struct tlb_lock *tlb_lock, struct cam_ram_regset *cr)
923 {
924         /* set victim */
925         set_tlb_lock(tlb_lock);
926
927 #if defined(CONFIG_ARCH_OMAP1)
928         /* read a TLB entry */
929         dsp_mmu_write_reg(DSP_MMU_LD_TLB_RD, DSP_MMU_LD_TLB);
930
931         cr->cam_h = dsp_mmu_read_reg(DSP_MMU_READ_CAM_H);
932         cr->cam_l = dsp_mmu_read_reg(DSP_MMU_READ_CAM_L);
933         cr->ram_h = dsp_mmu_read_reg(DSP_MMU_READ_RAM_H);
934         cr->ram_l = dsp_mmu_read_reg(DSP_MMU_READ_RAM_L);
935 #elif defined(CONFIG_ARCH_OMAP2)
936         cr->cam = dsp_mmu_read_reg(DSP_MMU_READ_CAM);
937         cr->ram = dsp_mmu_read_reg(DSP_MMU_READ_RAM);
938 #endif
939 }
940
941 static void __load_tlb(struct cam_ram_regset *cr)
942 {
943 #if defined(CONFIG_ARCH_OMAP1)
944         dsp_mmu_write_reg(cr->cam_h, DSP_MMU_CAM_H);
945         dsp_mmu_write_reg(cr->cam_l, DSP_MMU_CAM_L);
946         dsp_mmu_write_reg(cr->ram_h, DSP_MMU_RAM_H);
947         dsp_mmu_write_reg(cr->ram_l, DSP_MMU_RAM_L);
948 #elif defined(CONFIG_ARCH_OMAP2)
949         dsp_mmu_write_reg(cr->cam | DSP_MMU_CAM_V, DSP_MMU_CAM);
950         dsp_mmu_write_reg(cr->ram, DSP_MMU_RAM);
951 #endif
952
953         /* flush the entry */
954         dsp_mmu_flush();
955
956         /* load a TLB entry */
957         dsp_mmu_write_reg(DSP_MMU_LD_TLB_LD, DSP_MMU_LD_TLB);
958 }
959
960 static int dsp_mmu_load_tlb(struct tlb_entry *tlb_ent)
961 {
962         struct tlb_lock tlb_lock;
963         struct cam_ram_regset cr;
964
965 #ifdef CONFIG_ARCH_OMAP1
966         clk_enable(dsp_ck_handle);
967         omap_dsp_request_mem();
968 #endif
969
970         get_tlb_lock(&tlb_lock);
971         for (tlb_lock.victim = 0;
972              tlb_lock.victim < tlb_lock.base;
973              tlb_lock.victim++) {
974                 struct cam_ram_regset tmp_cr;
975
976                 /* read a TLB entry */
977                 __read_tlb(&tlb_lock, &tmp_cr);
978                 if (!cam_ram_valid(tmp_cr))
979                         goto found_victim;
980         }
981         set_tlb_lock(&tlb_lock);
982
983 found_victim:
984         /* The last (31st) entry cannot be locked? */
985         if (tlb_lock.victim == 31) {
986                 printk(KERN_ERR "omapdsp: TLB is full.\n");
987                 return -EBUSY;
988         }
989
990         if (tlb_ent->va & ~get_cam_va_mask(tlb_ent->pgsz)) {
991                 printk(KERN_ERR
992                        "omapdsp: mapping vadr (0x%06x) is not "
993                        "aligned boundary\n", tlb_ent->va);
994                 return -EINVAL;
995         }
996
997 #if defined(CONFIG_ARCH_OMAP1)
998         cr.cam_h = tlb_ent->va >> 22;
999         cr.cam_l = (tlb_ent->va >> 6 & get_cam_l_va_mask(tlb_ent->pgsz)) |
1000                    tlb_ent->prsvd | tlb_ent->pgsz;
1001         cr.ram_h = tlb_ent->pa >> 16;
1002         cr.ram_l = (tlb_ent->pa & DSP_MMU_RAM_L_RAM_LSB_MASK) | tlb_ent->ap;
1003 #elif defined(CONFIG_ARCH_OMAP2)
1004         cr.cam = (tlb_ent->va & DSP_MMU_CAM_VATAG_MASK) |
1005                  tlb_ent->prsvd | tlb_ent->pgsz;
1006         cr.ram = tlb_ent->pa | tlb_ent->endian | tlb_ent->elsz;
1007 #endif
1008         __load_tlb(&cr);
1009
1010         /* update lock base */
1011         if (tlb_lock.victim == tlb_lock.base)
1012                 tlb_lock.base++;
1013         tlb_lock.victim = tlb_lock.base;
1014         set_tlb_lock(&tlb_lock);
1015
1016 #ifdef CONFIG_ARCH_OMAP1
1017         omap_dsp_release_mem();
1018         clk_disable(dsp_ck_handle);
1019 #endif
1020         return 0;
1021 }
1022
1023 static int dsp_mmu_clear_tlb(dsp_long_t vadr)
1024 {
1025         struct tlb_lock tlb_lock;
1026         int i;
1027         int max_valid = 0;
1028
1029 #ifdef CONFIG_ARCH_OMAP1
1030         clk_enable(dsp_ck_handle);
1031         omap_dsp_request_mem();
1032 #endif
1033
1034         get_tlb_lock(&tlb_lock);
1035         for (i = 0; i < tlb_lock.base; i++) {
1036                 struct cam_ram_regset cr;
1037                 dsp_long_t cam_va;
1038                 dsp_mmu_reg_t pgsz;
1039
1040                 /* read a TLB entry */
1041                 tlb_lock.victim = i;
1042                 __read_tlb(&tlb_lock, &cr);
1043                 if (!cam_ram_valid(cr))
1044                         continue;
1045
1046 #if defined(CONFIG_ARCH_OMAP1)
1047                 pgsz = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
1048                 cam_va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
1049                          (u32)(cr.cam_l & get_cam_l_va_mask(pgsz)) << 6;
1050 #elif defined(CONFIG_ARCH_OMAP2)
1051                 pgsz = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
1052                 cam_va = cr.cam & get_cam_va_mask(pgsz);
1053 #endif
1054
1055                 if (cam_va == vadr)
1056                         /* flush the entry */
1057                         dsp_mmu_flush();
1058                 else
1059                         max_valid = i;
1060         }
1061
1062         /* set new lock base */
1063         tlb_lock.base   = max_valid + 1;
1064         tlb_lock.victim = max_valid + 1;
1065         set_tlb_lock(&tlb_lock);
1066
1067 #ifdef CONFIG_ARCH_OMAP1
1068         omap_dsp_release_mem();
1069         clk_disable(dsp_ck_handle);
1070 #endif
1071         return 0;
1072 }
1073
1074 static void dsp_mmu_gflush(void)
1075 {
1076         struct tlb_lock tlb_lock;
1077
1078 #ifdef CONFIG_ARCH_OMAP1
1079         clk_enable(dsp_ck_handle);
1080         omap_dsp_request_mem();
1081 #endif
1082
1083         __dsp_mmu_gflush();
1084         tlb_lock.base   = exmap_preserved_cnt;
1085         tlb_lock.victim = exmap_preserved_cnt;
1086         set_tlb_lock(&tlb_lock);
1087
1088 #ifdef CONFIG_ARCH_OMAP1
1089         omap_dsp_release_mem();
1090         clk_disable(dsp_ck_handle);
1091 #endif
1092 }
1093
1094 /*
1095  * dsp_exmap()
1096  *
1097  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
1098  * In this case, the buffer for DSP is allocated in this routine,
1099  * then it is mapped.
1100  * On the other hand, for example - frame buffer sharing, calls
1101  * this function with padr set. It means some known address space
1102  * pointed with padr is going to be shared with DSP.
1103  */
1104 static int dsp_exmap(dsp_long_t dspadr, unsigned long padr, unsigned long size,
1105                      enum exmap_type_e type)
1106 {
1107         dsp_mmu_reg_t pgsz;
1108         void *buf;
1109         unsigned int order = 0;
1110         unsigned long unit;
1111         int prev = -1;
1112         dsp_long_t _dspadr = dspadr;
1113         unsigned long _padr = padr;
1114         void *_vadr = dspbyte_to_virt(dspadr);
1115         unsigned long _size = size;
1116         struct tlb_entry tlb_ent;
1117         struct exmap_tbl_entry *exmap_ent;
1118         int status;
1119         int idx;
1120         int i;
1121
1122 #define MINIMUM_PAGESZ  SZ_4KB
1123         /*
1124          * alignment check
1125          */
1126         if (!is_aligned(size, MINIMUM_PAGESZ)) {
1127                 printk(KERN_ERR
1128                        "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
1129                 return -EINVAL;
1130         }
1131         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
1132                 printk(KERN_ERR
1133                        "omapdsp: DSP address(0x%x) is not aligned.\n", dspadr);
1134                 return -EINVAL;
1135         }
1136         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
1137                 printk(KERN_ERR
1138                        "omapdsp: physical address(0x%lx) is not aligned.\n",
1139                        padr);
1140                 return -EINVAL;
1141         }
1142
1143         /* address validity check */
1144         if ((dspadr < dspmem_size) ||
1145             (dspadr >= DSPSPACE_SIZE) ||
1146             ((dspadr + size > DSP_INIT_PAGE) &&
1147              (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
1148                 printk(KERN_ERR
1149                        "omapdsp: illegal address/size for dsp_exmap().\n");
1150                 return -EINVAL;
1151         }
1152
1153         down_write(&exmap_sem);
1154
1155         /* overlap check */
1156         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1157                 unsigned long mapsize;
1158                 struct exmap_tbl_entry *tmp_ent = &exmap_tbl[i];
1159
1160                 if (!tmp_ent->valid)
1161                         continue;
1162                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
1163                 if ((_vadr + size > tmp_ent->vadr) &&
1164                     (_vadr < tmp_ent->vadr + mapsize)) {
1165                         printk(KERN_ERR "omapdsp: exmap page overlap!\n");
1166                         up_write(&exmap_sem);
1167                         return -EINVAL;
1168                 }
1169         }
1170
1171 start:
1172         buf = NULL;
1173         /* Are there any free TLB lines?  */
1174         for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1175                 if (!exmap_tbl[idx].valid)
1176                         goto found_free;
1177         }
1178         printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
1179         status = -EBUSY;
1180         goto fail;
1181
1182 found_free:
1183         exmap_ent = &exmap_tbl[idx];
1184
1185         /*
1186          * we don't use
1187          * 1KB mapping in OMAP1,
1188          * 16MB mapping in OMAP2.
1189          */
1190         if ((_size >= SZ_1MB) &&
1191             (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
1192             is_aligned(_dspadr, SZ_1MB)) {
1193                 unit = SZ_1MB;
1194                 pgsz = DSP_MMU_CAM_PAGESIZE_1MB;
1195         } else if ((_size >= SZ_64KB) &&
1196                    (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
1197                    is_aligned(_dspadr, SZ_64KB)) {
1198                 unit = SZ_64KB;
1199                 pgsz = DSP_MMU_CAM_PAGESIZE_64KB;
1200         } else {
1201                 unit = SZ_4KB;
1202                 pgsz = DSP_MMU_CAM_PAGESIZE_4KB;
1203         }
1204
1205         order = get_order(unit);
1206
1207         /* buffer allocation */
1208         if (type == EXMAP_TYPE_MEM) {
1209                 struct page *page, *ps, *pe;
1210
1211                 if ((order == ORDER_1MB) && likely(kmem_pool_1M))
1212                         buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
1213                 else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
1214                         buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
1215                 else {
1216                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
1217                         if (buf == NULL) {
1218                                 status = -ENOMEM;
1219                                 goto fail;
1220                         }
1221                 }
1222
1223                 /* mark the pages as reserved; this is needed for mmap */
1224                 ps = virt_to_page(buf);
1225                 pe = virt_to_page(buf + unit);
1226
1227                 for (page = ps; page < pe; page++)
1228                         SetPageReserved(page);
1229
1230                 _padr = __pa(buf);
1231         }
1232
1233         /*
1234          * mapping for ARM MMU:
1235          * we should not access to the allocated memory through 'buf'
1236          * since this area should not be cashed.
1237          */
1238         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
1239         if (status < 0)
1240                 goto fail;
1241
1242         /* loading DSP TLB entry */
1243         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
1244         status = dsp_mmu_load_tlb(&tlb_ent);
1245         if (status < 0) {
1246                 exmap_clear_armmmu((unsigned long)_vadr, unit);
1247                 goto fail;
1248         }
1249
1250         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
1251         exmap_ent->link.prev = prev;
1252         if (prev >= 0)
1253                 exmap_tbl[prev].link.next = idx;
1254
1255         if ((_size -= unit) == 0) {     /* normal completion */
1256                 up_write(&exmap_sem);
1257                 return size;
1258         }
1259
1260         _dspadr += unit;
1261         _vadr   += unit;
1262         _padr = padr ? _padr + unit : 0;
1263         prev = idx;
1264         goto start;
1265
1266 fail:
1267         up_write(&exmap_sem);
1268         if (buf)
1269                 dsp_mem_free_pages((unsigned long)buf, order);
1270         dsp_exunmap(dspadr);
1271         return status;
1272 }
1273
1274 static unsigned long unmap_free_arm(struct exmap_tbl_entry *ent)
1275 {
1276         unsigned long size;
1277
1278         /* clearing ARM MMU */
1279         size = 1 << (ent->order + PAGE_SHIFT);
1280         exmap_clear_armmmu((unsigned long)ent->vadr, size);
1281
1282         /* freeing allocated memory */
1283         if (ent->type == EXMAP_TYPE_MEM) {
1284                 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
1285                 printk(KERN_DEBUG
1286                        "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
1287                        size, ent->buf);
1288         }
1289 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1290         else if (ent->type == EXMAP_TYPE_FB) {
1291                 int status;
1292                 if (omapfb_nb) {
1293                         status = omapfb_unregister_client(omapfb_nb);
1294                         if (!status)
1295                                 printk("omapfb_unregister_client(): "
1296                                        "success\n");
1297                         else
1298                                 printk("omapfb_runegister_client(): "
1299                                        "failure(%d)\n", status);
1300                         kfree(omapfb_nb);
1301                         omapfb_nb = NULL;
1302                         omapfb_ready = 0;
1303                 }
1304         }
1305 #endif
1306
1307         return size;
1308 }
1309
1310 static int dsp_exunmap(dsp_long_t dspadr)
1311 {
1312         void *vadr;
1313         unsigned long size;
1314         int total = 0;
1315         struct exmap_tbl_entry *ent;
1316         int idx;
1317
1318         vadr = dspbyte_to_virt(dspadr);
1319         down_write(&exmap_sem);
1320         for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1321                 ent = &exmap_tbl[idx];
1322                 if ((!ent->valid) || ent->prsvd)
1323                         continue;
1324                 if (ent->vadr == vadr)
1325                         goto found_map;
1326         }
1327         up_write(&exmap_sem);
1328         printk(KERN_WARNING
1329                "omapdsp: address %06x not found in exmap_tbl.\n", dspadr);
1330         return -EINVAL;
1331
1332 found_map:
1333         if (ent->usecount > 0) {
1334                 printk(KERN_ERR
1335                        "omapdsp: exmap reference count is not 0.\n"
1336                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
1337                        idx, ent->vadr, ent->order, ent->usecount);
1338                 up_write(&exmap_sem);
1339                 return -EINVAL;
1340         }
1341         /* clearing DSP TLB entry */
1342         dsp_mmu_clear_tlb(dspadr);
1343
1344         /* clear ARM MMU and free buffer */
1345         size = unmap_free_arm(ent);
1346         ent->valid = 0;
1347         total += size;
1348
1349         /* we don't free PTEs */
1350
1351         /* flush TLB */
1352         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
1353
1354         if ((idx = ent->link.next) < 0)
1355                 goto up_out;    /* normal completion */
1356         ent = &exmap_tbl[idx];
1357         dspadr += size;
1358         vadr   += size;
1359         if (ent->vadr == vadr)
1360                 goto found_map; /* continue */
1361
1362         printk(KERN_ERR
1363                "omapdsp: illegal exmap_tbl grouping!\n"
1364                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1365                vadr, idx, ent->vadr);
1366         up_write(&exmap_sem);
1367         return -EINVAL;
1368
1369 up_out:
1370         up_write(&exmap_sem);
1371         return total;
1372 }
1373
1374 static void exmap_flush(void)
1375 {
1376         struct exmap_tbl_entry *ent;
1377         int i;
1378
1379         down_write(&exmap_sem);
1380
1381         /* clearing DSP TLB entry */
1382         dsp_mmu_gflush();
1383
1384         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1385                 ent = &exmap_tbl[i];
1386                 if (ent->valid && (!ent->prsvd)) {
1387                         unmap_free_arm(ent);
1388                         ent->valid = 0;
1389                 }
1390         }
1391
1392         /* flush TLB */
1393         flush_tlb_kernel_range(dspmem_base + dspmem_size,
1394                                dspmem_base + DSPSPACE_SIZE);
1395         up_write(&exmap_sem);
1396 }
1397
1398 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1399 #ifndef CONFIG_FB
1400 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1401 #endif /* CONFIG_FB */
1402
1403 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1404 static int omapfb_notifier_cb(struct notifier_block *omapfb_nb,
1405                               unsigned long event, void *fbi)
1406 {
1407         /* XXX */
1408         printk("omapfb_notifier_cb(): event = %s\n",
1409                (event == OMAPFB_EVENT_READY)    ? "READY" :
1410                (event == OMAPFB_EVENT_DISABLED) ? "DISABLED" : "Unknown");
1411         if (event == OMAPFB_EVENT_READY)
1412                 omapfb_ready = 1;
1413         else if (event == OMAPFB_EVENT_DISABLED)
1414                 omapfb_ready = 0;
1415         return 0;
1416 }
1417 #endif
1418
1419 static int dsp_fbexport(dsp_long_t *dspadr)
1420 {
1421         dsp_long_t dspadr_actual;
1422         unsigned long padr_sys, padr, fbsz_sys, fbsz;
1423         int cnt;
1424 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1425         int status;
1426 #endif
1427
1428         printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1429
1430 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1431         if (omapfb_nb) {
1432                 printk(KERN_WARNING
1433                        "omapdsp: frame buffer has been exported already!\n");
1434                 return -EBUSY;
1435         }
1436 #endif
1437
1438         if (num_registered_fb == 0) {
1439                 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1440                 return -EINVAL;
1441         }
1442         if (num_registered_fb != 1) {
1443                 printk(KERN_INFO
1444                        "omapdsp: %d frame buffers found. we use first one.\n",
1445                        num_registered_fb);
1446         }
1447         padr_sys = registered_fb[0]->fix.smem_start;
1448         fbsz_sys = registered_fb[0]->fix.smem_len;
1449         if (fbsz_sys == 0) {
1450                 printk(KERN_ERR
1451                        "omapdsp: framebuffer doesn't seem to be configured "
1452                        "correctly! (size=0)\n");
1453                 return -EINVAL;
1454         }
1455
1456         /*
1457          * align padr and fbsz to 4kB boundary
1458          * (should be noted to the user afterwards!)
1459          */
1460         padr = padr_sys & ~(SZ_4KB-1);
1461         fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1462
1463         /* line up dspadr offset with padr */
1464         dspadr_actual =
1465                 (fbsz > SZ_1MB) ?  lineup_offset(*dspadr, padr, SZ_1MB-1) :
1466                 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1467                 /* (fbsz > SZ_4KB) ? */ *dspadr;
1468         if (dspadr_actual != *dspadr)
1469                 printk(KERN_DEBUG
1470                        "omapdsp: actual dspadr for FBEXPORT = %08x\n",
1471                        dspadr_actual);
1472         *dspadr = dspadr_actual;
1473
1474         cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1475         if (cnt < 0) {
1476                 printk(KERN_ERR "omapdsp: exmap failure.\n");
1477                 return cnt;
1478         }
1479
1480         if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1481                 printk(KERN_WARNING
1482 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1483 "  !!  screen base address or size is not aligned in 4kB:           !!\n"
1484 "  !!    actual screen  adr = %08lx, size = %08lx             !!\n"
1485 "  !!    exporting      adr = %08lx, size = %08lx             !!\n"
1486 "  !!  Make sure that the framebuffer is allocated with 4kB-order!  !!\n"
1487 "  !!  Otherwise DSP can corrupt the kernel memory.                 !!\n"
1488 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1489                        padr_sys, fbsz_sys, padr, fbsz);
1490         }
1491
1492 #ifdef CONFIG_ARCH_OMAP1
1493         /* increase the DMA priority */
1494         set_emiff_dma_prio(15);
1495 #endif
1496
1497 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1498         omapfb_nb = kmalloc(sizeof(struct omapfb_notifier_block), GFP_KERNEL);
1499         if (omapfb_nb == NULL) {
1500                 printk(KERN_ERR
1501                        "omapdsp: failed to allocate memory for omapfb_nb!\n");
1502                 dsp_exunmap(dspadr_actual);
1503                 return -ENOMEM;
1504         }
1505         status = omapfb_register_client(omapfb_nb, omapfb_notifier_cb, NULL);
1506         if (!status)
1507                 printk("omapfb_register_client(): success\n");
1508         else
1509                 printk("omapfb_register_client(): failure(%d)\n", status);
1510 #endif
1511
1512         return cnt;
1513 }
1514
1515 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1516
1517 static int dsp_fbexport(dsp_long_t *dspadr)
1518 {
1519         printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1520         return -EINVAL;
1521 }
1522
1523 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1524
1525 static void exmap_setup_preserved_mem_page(void *buf, dsp_long_t dspadr,
1526                                            int exmap_idx)
1527 {
1528         unsigned long phys;
1529         void *virt;
1530         struct tlb_entry tlb_ent;
1531
1532         phys = __pa(buf);
1533         virt = dspbyte_to_virt(dspadr);
1534         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1535         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], buf, virt);
1536         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1537         dsp_mmu_load_tlb(&tlb_ent);
1538 }
1539
1540 static void exmap_clear_mem_page(dsp_long_t dspadr)
1541 {
1542         void *virt;
1543
1544         virt = dspbyte_to_virt(dspadr);
1545         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1546         /* DSP MMU is shutting down. not handled here. */
1547 }
1548
1549 #ifdef CONFIG_ARCH_OMAP2
1550 static void exmap_setup_iomap_page(unsigned long phys, unsigned long dsp_io_adr,
1551                                    int exmap_idx)
1552 {
1553         dsp_long_t dspadr;
1554         void *virt;
1555         struct tlb_entry tlb_ent;
1556
1557         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1558         virt = dspbyte_to_virt(dspadr);
1559         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1560         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], NULL, virt);
1561         INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
1562         dsp_mmu_load_tlb(&tlb_ent);
1563 }
1564
1565 static void exmap_clear_iomap_page(unsigned long dsp_io_adr)
1566 {
1567         dsp_long_t dspadr;
1568         void *virt;
1569
1570         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1571         virt = dspbyte_to_virt(dspadr);
1572         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1573         /* DSP MMU is shutting down. not handled here. */
1574 }
1575 #endif /* CONFIG_ARCH_OMAP2 */
1576
1577 #define OMAP2420_GPT5_BASE      (L4_24XX_BASE + 0x7c000)
1578 #define OMAP2420_GPT6_BASE      (L4_24XX_BASE + 0x7e000)
1579 #define OMAP2420_GPT7_BASE      (L4_24XX_BASE + 0x80000)
1580 #define OMAP2420_GPT8_BASE      (L4_24XX_BASE + 0x82000)
1581 #define OMAP24XX_EAC_BASE       (L4_24XX_BASE + 0x90000)
1582
1583 static int exmap_setup_preserved_entries(void)
1584 {
1585         int n = 0;
1586
1587         exmap_setup_preserved_mem_page(dspvect_page, DSP_INIT_PAGE, n++);
1588 #ifdef CONFIG_ARCH_OMAP2
1589         exmap_setup_iomap_page(OMAP24XX_PRCM_BASE,     0x7000, n++);
1590 #ifdef CONFIG_ARCH_OMAP2420
1591         exmap_setup_iomap_page(OMAP2420_GPT5_BASE,     0xe000, n++);
1592         exmap_setup_iomap_page(OMAP2420_GPT6_BASE,     0xe800, n++);
1593         exmap_setup_iomap_page(OMAP2420_GPT7_BASE,     0xf000, n++);
1594         exmap_setup_iomap_page(OMAP2420_GPT8_BASE,     0xf800, n++);
1595 #endif /* CONFIG_ARCH_OMAP2420 */
1596         exmap_setup_iomap_page(OMAP24XX_EAC_BASE,     0x10000, n++);
1597         exmap_setup_iomap_page(OMAP24XX_MAILBOX_BASE, 0x11000, n++);
1598 #endif /* CONFIG_ARCH_OMAP2 */
1599
1600         return n;
1601 }
1602
1603 static void exmap_clear_preserved_entries(void)
1604 {
1605         exmap_clear_mem_page(DSP_INIT_PAGE);
1606 #ifdef CONFIG_ARCH_OMAP2
1607         exmap_clear_iomap_page(0x7000);         /* PRCM */
1608 #ifdef CONFIG_ARCH_OMAP2420
1609         exmap_clear_iomap_page(0xe000);         /* GPT5 */
1610         exmap_clear_iomap_page(0xe800);         /* GPT6 */
1611         exmap_clear_iomap_page(0xf000);         /* GPT7 */
1612         exmap_clear_iomap_page(0xf800);         /* GPT8 */
1613 #endif /* CONFIG_ARCH_OMAP2420 */
1614         exmap_clear_iomap_page(0x10000);        /* EAC */
1615         exmap_clear_iomap_page(0x11000);        /* MAILBOX */
1616 #endif /* CONFIG_ARCH_OMAP2 */
1617 }
1618
1619 #ifdef CONFIG_ARCH_OMAP1
1620 static int dsp_mmu_itack(void)
1621 {
1622         unsigned long dspadr;
1623
1624         printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1625         if (!dsp_err_isset(ERRCODE_MMU)) {
1626                 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1627                 return -EINVAL;
1628         }
1629         dspadr = dsp_fault_adr & ~(SZ_4K-1);
1630         dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM);    /* FIXME: reserve TLB entry for this */
1631         printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1632         dsp_set_runlevel(RUNLEVEL_RECOVERY);
1633         __dsp_mmu_itack();
1634         udelay(100);
1635         dsp_exunmap(dspadr);
1636         dsp_err_clear(ERRCODE_MMU);
1637         return 0;
1638 }
1639 #endif /* CONFIG_ARCH_OMAP1 */
1640
1641 #ifdef CONFIG_ARCH_OMAP2
1642 #define MMU_IRQ_MASK \
1643         (DSP_MMU_IRQ_MULTIHITFAULT | \
1644          DSP_MMU_IRQ_TABLEWALKFAULT | \
1645          DSP_MMU_IRQ_EMUMISS | \
1646          DSP_MMU_IRQ_TRANSLATIONFAULT | \
1647          DSP_MMU_IRQ_TLBMISS)
1648 #endif
1649
1650 static void dsp_mmu_init(void)
1651 {
1652         struct tlb_lock tlb_lock;
1653
1654 #ifdef CONFIG_ARCH_OMAP1
1655         clk_enable(dsp_ck_handle);
1656         omap_dsp_request_mem();
1657 #endif
1658         down_write(&exmap_sem);
1659
1660 #if defined(CONFIG_ARCH_OMAP1)
1661         dsp_mmu_disable();      /* clear all */
1662         udelay(100);
1663 #elif defined(CONFIG_ARCH_OMAP2)
1664         dsp_mmu_reset();
1665 #endif
1666         dsp_mmu_enable();
1667
1668         /* DSP TLB initialization */
1669         tlb_lock.base   = 0;
1670         tlb_lock.victim = 0;
1671         set_tlb_lock(&tlb_lock);
1672
1673         exmap_preserved_cnt = exmap_setup_preserved_entries();
1674
1675 #ifdef CONFIG_ARCH_OMAP2
1676         /* MMU IRQ mask setup */
1677         dsp_mmu_write_reg(MMU_IRQ_MASK, DSP_MMU_IRQENABLE);
1678 #endif
1679
1680         up_write(&exmap_sem);
1681 #ifdef CONFIG_ARCH_OMAP1
1682         omap_dsp_release_mem();
1683         clk_disable(dsp_ck_handle);
1684 #endif
1685 }
1686
1687 static void dsp_mmu_shutdown(void)
1688 {
1689         exmap_flush();
1690         exmap_clear_preserved_entries();
1691         dsp_mmu_disable();
1692 }
1693
1694 #ifdef CONFIG_ARCH_OMAP1
1695 /*
1696  * intmem_enable() / disable():
1697  * if the address is in DSP internal memories,
1698  * we send PM mailbox commands so that DSP DMA domain won't go in idle
1699  * when ARM is accessing to those memories.
1700  */
1701 static int intmem_enable(void)
1702 {
1703         int ret = 0;
1704
1705         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1706                 ret = mbcompose_send(PM, PM_ENABLE, DSPREG_ICR_DMA);
1707
1708         return ret;
1709 }
1710
1711 static void intmem_disable(void) {
1712         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1713                 mbcompose_send(PM, PM_DISABLE, DSPREG_ICR_DMA);
1714 }
1715 #endif /* CONFIG_ARCH_OMAP1 */
1716
1717 /*
1718  * dsp_mem_enable() / disable()
1719  */
1720 #ifdef CONFIG_ARCH_OMAP1
1721 int intmem_usecount;
1722 #endif
1723
1724 int dsp_mem_enable(void *adr)
1725 {
1726         int ret = 0;
1727
1728         if (is_dsp_internal_mem(adr)) {
1729 #ifdef CONFIG_ARCH_OMAP1
1730                 if (intmem_usecount++ == 0)
1731                         ret = omap_dsp_request_mem();
1732 #endif
1733         } else
1734                 down_read(&exmap_sem);
1735
1736         return ret;
1737 }
1738
1739 void dsp_mem_disable(void *adr)
1740 {
1741         if (is_dsp_internal_mem(adr)) {
1742 #ifdef CONFIG_ARCH_OMAP1
1743                 if (--intmem_usecount == 0)
1744                         omap_dsp_release_mem();
1745 #endif
1746         } else
1747                 up_read(&exmap_sem);
1748 }
1749
1750 /* for safety */
1751 #ifdef CONFIG_ARCH_OMAP1
1752 void dsp_mem_usecount_clear(void)
1753 {
1754         if (intmem_usecount != 0) {
1755                 printk(KERN_WARNING
1756                        "omapdsp: unbalanced memory request/release detected.\n"
1757                        "         intmem_usecount is not zero at where "
1758                        "it should be! ... fixed to be zero.\n");
1759                 intmem_usecount = 0;
1760                 omap_dsp_release_mem();
1761         }
1762 }
1763 #endif /* CONFIG_ARCH_OMAP1 */
1764
1765 /*
1766  * dsp_mem file operations
1767  */
1768 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1769 {
1770         loff_t ret;
1771
1772         mutex_lock(&file->f_dentry->d_inode->i_mutex);
1773         switch (orig) {
1774         case 0:
1775                 file->f_pos = offset;
1776                 ret = file->f_pos;
1777                 break;
1778         case 1:
1779                 file->f_pos += offset;
1780                 ret = file->f_pos;
1781                 break;
1782         default:
1783                 ret = -EINVAL;
1784         }
1785         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1786         return ret;
1787 }
1788
1789 static ssize_t intmem_read(struct file *file, char __user *buf, size_t count,
1790                            loff_t *ppos)
1791 {
1792         unsigned long p = *ppos;
1793         void *vadr = dspbyte_to_virt(p);
1794         ssize_t size = dspmem_size;
1795         ssize_t read;
1796
1797         if (p >= size)
1798                 return 0;
1799 #ifdef CONFIG_ARCH_OMAP1
1800         clk_enable(api_ck_handle);
1801 #endif
1802         read = count;
1803         if (count > size - p)
1804                 read = size - p;
1805         if (copy_to_user(buf, vadr, read)) {
1806                 read = -EFAULT;
1807                 goto out;
1808         }
1809         *ppos += read;
1810 out:
1811 #ifdef CONFIG_ARCH_OMAP1
1812         clk_disable(api_ck_handle);
1813 #endif
1814         return read;
1815 }
1816
1817 static ssize_t exmem_read(struct file *file, char __user *buf, size_t count,
1818                           loff_t *ppos)
1819 {
1820         unsigned long p = *ppos;
1821         void *vadr = dspbyte_to_virt(p);
1822
1823         if (!exmap_valid(vadr, count)) {
1824                 printk(KERN_ERR
1825                        "omapdsp: DSP address %08lx / size %08x "
1826                        "is not valid!\n", p, count);
1827                 return -EFAULT;
1828         }
1829         if (count > DSPSPACE_SIZE - p)
1830                 count = DSPSPACE_SIZE - p;
1831         if (copy_to_user(buf, vadr, count))
1832                 return -EFAULT;
1833         *ppos += count;
1834
1835         return count;
1836 }
1837
1838 static ssize_t dsp_mem_read(struct file *file, char __user *buf, size_t count,
1839                             loff_t *ppos)
1840 {
1841         int ret;
1842         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1843
1844         if (dsp_mem_enable(vadr) < 0)
1845                 return -EBUSY;
1846         if (is_dspbyte_internal_mem(*ppos))
1847                 ret = intmem_read(file, buf, count, ppos);
1848         else
1849                 ret = exmem_read(file, buf, count, ppos);
1850         dsp_mem_disable(vadr);
1851
1852         return ret;
1853 }
1854
1855 static ssize_t intmem_write(struct file *file, const char __user *buf,
1856                             size_t count, loff_t *ppos)
1857 {
1858         unsigned long p = *ppos;
1859         void *vadr = dspbyte_to_virt(p);
1860         ssize_t size = dspmem_size;
1861         ssize_t written;
1862
1863         if (p >= size)
1864                 return 0;
1865 #ifdef CONFIG_ARCH_OMAP1
1866         clk_enable(api_ck_handle);
1867 #endif
1868         written = count;
1869         if (count > size - p)
1870                 written = size - p;
1871         if (copy_from_user(vadr, buf, written)) {
1872                 written = -EFAULT;
1873                 goto out;
1874         }
1875         *ppos += written;
1876 out:
1877 #ifdef CONFIG_ARCH_OMAP1
1878         clk_disable(api_ck_handle);
1879 #endif
1880         return written;
1881 }
1882
1883 static ssize_t exmem_write(struct file *file, const char __user *buf,
1884                            size_t count, loff_t *ppos)
1885 {
1886         unsigned long p = *ppos;
1887         void *vadr = dspbyte_to_virt(p);
1888
1889         if (!exmap_valid(vadr, count)) {
1890                 printk(KERN_ERR
1891                        "omapdsp: DSP address %08lx / size %08x "
1892                        "is not valid!\n", p, count);
1893                 return -EFAULT;
1894         }
1895         if (count > DSPSPACE_SIZE - p)
1896                 count = DSPSPACE_SIZE - p;
1897         if (copy_from_user(vadr, buf, count))
1898                 return -EFAULT;
1899         *ppos += count;
1900
1901         return count;
1902 }
1903
1904 static ssize_t dsp_mem_write(struct file *file, const char __user *buf,
1905                              size_t count, loff_t *ppos)
1906 {
1907         int ret;
1908         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1909
1910         if (dsp_mem_enable(vadr) < 0)
1911                 return -EBUSY;
1912         if (is_dspbyte_internal_mem(*ppos))
1913                 ret = intmem_write(file, buf, count, ppos);
1914         else
1915                 ret = exmem_write(file, buf, count, ppos);
1916         dsp_mem_disable(vadr);
1917
1918         return ret;
1919 }
1920
1921 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1922                          unsigned int cmd, unsigned long arg)
1923 {
1924         switch (cmd) {
1925         case MEM_IOCTL_MMUINIT:
1926                 dsp_mmu_init();
1927                 return 0;
1928
1929         case MEM_IOCTL_EXMAP:
1930                 {
1931                         struct omap_dsp_mapinfo mapinfo;
1932                         if (copy_from_user(&mapinfo, (void __user *)arg,
1933                                            sizeof(mapinfo)))
1934                                 return -EFAULT;
1935                         return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1936                                          EXMAP_TYPE_MEM);
1937                 }
1938
1939         case MEM_IOCTL_EXUNMAP:
1940                 return dsp_exunmap((unsigned long)arg);
1941
1942         case MEM_IOCTL_EXMAP_FLUSH:
1943                 exmap_flush();
1944                 return 0;
1945
1946         case MEM_IOCTL_FBEXPORT:
1947                 {
1948                         dsp_long_t dspadr;
1949                         int ret;
1950                         if (copy_from_user(&dspadr, (void __user *)arg,
1951                                            sizeof(dsp_long_t)))
1952                                 return -EFAULT;
1953                         ret = dsp_fbexport(&dspadr);
1954                         if (copy_to_user((void __user *)arg, &dspadr,
1955                                          sizeof(dsp_long_t)))
1956                                 return -EFAULT;
1957                         return ret;
1958                 }
1959
1960 #ifdef CONFIG_ARCH_OMAP1
1961         case MEM_IOCTL_MMUITACK:
1962                 return dsp_mmu_itack();
1963 #endif
1964
1965         case MEM_IOCTL_KMEM_RESERVE:
1966                 {
1967                         __u32 size;
1968                         if (copy_from_user(&size, (void __user *)arg,
1969                                            sizeof(__u32)))
1970                                 return -EFAULT;
1971                         return dsp_kmem_reserve(size);
1972                 }
1973
1974         case MEM_IOCTL_KMEM_RELEASE:
1975                 dsp_kmem_release();
1976                 return 0;
1977
1978         default:
1979                 return -ENOIOCTLCMD;
1980         }
1981 }
1982
1983 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1984 {
1985         /*
1986          * FIXME
1987          */
1988         return -ENOSYS;
1989 }
1990
1991 static int dsp_mem_open(struct inode *inode, struct file *file)
1992 {
1993         if (!capable(CAP_SYS_RAWIO))
1994                 return -EPERM;
1995
1996         return 0;
1997 }
1998
1999 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
2000 /*
2001  * fb update functions:
2002  * fbupd_response() is executed by the workqueue.
2003  * fbupd_cb() is called when fb update is done, in interrupt context.
2004  * mbox_fbupd() is called when KFUNC:FBCTL:UPD is received from DSP.
2005  */
2006 static void fbupd_response(void *arg)
2007 {
2008         int status;
2009
2010         status = mbcompose_send(KFUNC, KFUNC_FBCTL, FBCTL_UPD);
2011         if (status < 0) {
2012                 /* FIXME: DSP is busy !! */
2013                 printk(KERN_ERR
2014                        "omapdsp: DSP is busy when trying to send FBCTL:UPD "
2015                        "response!\n");
2016         }
2017 }
2018
2019 static DECLARE_WORK(fbupd_response_work, (void (*)(void *))fbupd_response,
2020                     NULL);
2021
2022 static void fbupd_cb(void *arg)
2023 {
2024         schedule_work(&fbupd_response_work);
2025 }
2026
2027 void mbox_fbctl_upd(void)
2028 {
2029         struct omapfb_update_window win;
2030         volatile unsigned short *buf = ipbuf_sys_da->d;
2031
2032         /* FIXME: try count sometimes exceeds 1000. */
2033         if (sync_with_dsp(&ipbuf_sys_da->s, TID_ANON, 5000) < 0) {
2034                 printk(KERN_ERR "mbox: FBCTL:UPD - IPBUF sync failed!\n");
2035                 return;
2036         }
2037         win.x = buf[0];
2038         win.y = buf[1];
2039         win.width = buf[2];
2040         win.height = buf[3];
2041         win.format = buf[4];
2042         release_ipbuf_pvt(ipbuf_sys_da);
2043
2044         if (!omapfb_ready) {
2045                 printk(KERN_WARNING
2046                        "omapdsp: fbupd() called while HWA742 is not ready!\n");
2047                 return;
2048         }
2049         //printk("calling omapfb_update_window_async()\n");
2050         omapfb_update_window_async(registered_fb[1], &win, fbupd_cb, NULL);
2051 }
2052
2053 #else /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2054
2055 void mbox_fbctl_upd(void)
2056 {
2057 }
2058 #endif /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2059
2060 /*
2061  * sysfs files
2062  */
2063
2064 /* mmu */
2065 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
2066                         char *buf)
2067 {
2068         int len;
2069         struct tlb_lock tlb_lock_org;
2070         int i;
2071
2072 #ifdef CONFIG_ARCH_OMAP1
2073         clk_enable(dsp_ck_handle);
2074         omap_dsp_request_mem();
2075 #endif
2076         down_read(&exmap_sem);
2077
2078         get_tlb_lock(&tlb_lock_org);
2079
2080 #if defined(CONFIG_ARCH_OMAP1)
2081         len = sprintf(buf, "P: preserved, V: valid\n"
2082                            "ety P V size   cam_va     ram_pa ap\n");
2083                          /* 00: P V  4KB 0x300000 0x10171800 FA */
2084 #elif defined(CONFIG_ARCH_OMAP2)
2085         len = sprintf(buf, "P: preserved, V: valid\n"
2086                            "B: big endian, L:little endian, "
2087                            "M: mixed page attribute\n"
2088                            "ety P V size   cam_va     ram_pa E ES M\n");
2089                          /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
2090 #endif
2091
2092         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2093                 struct cam_ram_regset cr;
2094                 struct tlb_lock tlb_lock_tmp;
2095                 struct tlb_entry ent;
2096 #if defined(CONFIG_ARCH_OMAP1)
2097                 char *pgsz_str, *ap_str;
2098 #elif defined(CONFIG_ARCH_OMAP2)
2099                 char *pgsz_str, *elsz_str;
2100 #endif
2101
2102                 /* read a TLB entry */
2103                 tlb_lock_tmp.base   = tlb_lock_org.base;
2104                 tlb_lock_tmp.victim = i;
2105                 __read_tlb(&tlb_lock_tmp, &cr);
2106
2107 #if defined(CONFIG_ARCH_OMAP1)
2108                 ent.pgsz  = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
2109                 ent.prsvd = cr.cam_l & DSP_MMU_CAM_P;
2110                 ent.valid = cr.cam_l & DSP_MMU_CAM_V;
2111                 ent.ap    = cr.ram_l & DSP_MMU_RAM_L_AP_MASK;
2112                 ent.va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
2113                          (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
2114                 ent.pa = (unsigned long)cr.ram_h << 16 |
2115                          (cr.ram_l & DSP_MMU_RAM_L_RAM_LSB_MASK);
2116
2117                 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
2118                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2119                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
2120                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1KB)  ? " 1KB":
2121                                                                      " ???";
2122                 ap_str = (ent.ap == DSP_MMU_RAM_L_AP_RO) ? "RO":
2123                          (ent.ap == DSP_MMU_RAM_L_AP_FA) ? "FA":
2124                          (ent.ap == DSP_MMU_RAM_L_AP_NA) ? "NA":
2125                                                            "??";
2126 #elif defined(CONFIG_ARCH_OMAP2)
2127                 ent.pgsz   = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
2128                 ent.prsvd  = cr.cam & DSP_MMU_CAM_P;
2129                 ent.valid  = cr.cam & DSP_MMU_CAM_V;
2130                 ent.va     = cr.cam & DSP_MMU_CAM_VATAG_MASK;
2131                 ent.endian = cr.ram & DSP_MMU_RAM_ENDIANNESS;
2132                 ent.elsz   = cr.ram & DSP_MMU_RAM_ELEMENTSIZE_MASK;
2133                 ent.pa     = cr.ram & DSP_MMU_RAM_PADDR_MASK;
2134                 ent.mixed  = cr.ram & DSP_MMU_RAM_MIXED;
2135
2136                 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
2137                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
2138                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2139                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
2140                                                                      " ???";
2141                 elsz_str = (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_8)  ? " 8":
2142                            (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_16) ? "16":
2143                            (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_32) ? "32":
2144                                                                       "??";
2145 #endif
2146
2147                 if (i == tlb_lock_org.base)
2148                         len += sprintf(buf + len, "lock base = %d\n",
2149                                        tlb_lock_org.base);
2150                 if (i == tlb_lock_org.victim)
2151                         len += sprintf(buf + len, "victim    = %d\n",
2152                                        tlb_lock_org.victim);
2153 #if defined(CONFIG_ARCH_OMAP1)
2154                 len += sprintf(buf + len,
2155                                /* 00: P V  4KB 0x300000 0x10171800 FA */
2156                                "%02d: %c %c %s 0x%06x 0x%08lx %s\n",
2157                                i,
2158                                ent.prsvd ? 'P' : ' ',
2159                                ent.valid ? 'V' : ' ',
2160                                pgsz_str, ent.va, ent.pa, ap_str);
2161 #elif defined(CONFIG_ARCH_OMAP2)
2162                 len += sprintf(buf + len,
2163                                /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
2164                                "%02d: %c %c %s 0x%06x 0x%08lx %c %s %c\n",
2165                                i,
2166                                ent.prsvd ? 'P' : ' ',
2167                                ent.valid ? 'V' : ' ',
2168                                pgsz_str, ent.va, ent.pa,
2169                                ent.endian ? 'B' : 'L',
2170                                elsz_str,
2171                                ent.mixed ? 'M' : ' ');
2172 #endif /* CONFIG_ARCH_OMAP2 */
2173         }
2174
2175         /* restore victim entry */
2176         set_tlb_lock(&tlb_lock_org);
2177
2178         up_read(&exmap_sem);
2179 #ifdef CONFIG_ARCH_OMAP1
2180         omap_dsp_release_mem();
2181         clk_disable(dsp_ck_handle);
2182 #endif
2183         return len;
2184 }
2185
2186 /* exmap */
2187 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
2188                           char *buf)
2189 {
2190         int len;
2191         int i;
2192
2193         down_read(&exmap_sem);
2194         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
2195                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
2196         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2197                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
2198                 void *vadr;
2199                 unsigned long size;
2200                 enum exmap_type_e type;
2201                 int idx;
2202
2203                 /* find a top of link */
2204                 if (!ent->valid || (ent->link.prev >= 0))
2205                         continue;
2206
2207                 vadr = ent->vadr;
2208                 type = ent->type;
2209                 size = 0;
2210                 idx = i;
2211                 do {
2212                         ent = &exmap_tbl[idx];
2213                         size += PAGE_SIZE << ent->order;
2214                 } while ((idx = ent->link.next) >= 0);
2215
2216                 len += sprintf(buf + len, "0x%06x %#8lx",
2217                                virt_to_dspbyte(vadr), size);
2218
2219                 if (type == EXMAP_TYPE_FB) {
2220                         len += sprintf(buf + len, "    framebuf\n");
2221                 } else {
2222                         len += sprintf(buf + len, "\n");
2223                         idx = i;
2224                         do {
2225                                 ent = &exmap_tbl[idx];
2226                                 len += sprintf(buf + len,
2227                                                /* 0xc0171000 0x100000  0*/
2228                                                "%19s0x%8p %#8lx %2d\n",
2229                                                "", ent->buf,
2230                                                PAGE_SIZE << ent->order,
2231                                                ent->usecount);
2232                         } while ((idx = ent->link.next) >= 0);
2233                 }
2234         }
2235
2236         up_read(&exmap_sem);
2237         return len;
2238 }
2239
2240 /* mempool */
2241 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
2242                             char *buf)
2243 {
2244         int min_nr_1M = 0, curr_nr_1M = 0;
2245         int min_nr_64K = 0, curr_nr_64K = 0;
2246         int total = 0;
2247
2248         if (likely(kmem_pool_1M)) {
2249                 min_nr_1M  = kmem_pool_1M->min_nr;
2250                 curr_nr_1M = kmem_pool_1M->curr_nr;
2251                 total += min_nr_1M * SZ_1MB;
2252         }
2253         if (likely(kmem_pool_64K)) {
2254                 min_nr_64K  = kmem_pool_64K->min_nr;
2255                 curr_nr_64K = kmem_pool_64K->curr_nr;
2256                 total += min_nr_64K * SZ_64KB;
2257         }
2258
2259         return sprintf(buf,
2260                        "0x%x\n"
2261                        "1M  buffer: %d (%d free)\n"
2262                        "64K buffer: %d (%d free)\n",
2263                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
2264 }
2265
2266 /*
2267  * workqueue for mmu int
2268  */
2269 #ifdef CONFIG_ARCH_OMAP1
2270 /*
2271  * MMU fault mask:
2272  * We ignore prefetch err.
2273  */
2274 #define MMUFAULT_MASK \
2275         (DSP_MMU_FAULT_ST_PERM |\
2276          DSP_MMU_FAULT_ST_TLB_MISS |\
2277          DSP_MMU_FAULT_ST_TRANS)
2278 #endif /* CONFIG_ARCH_OMAP1 */
2279
2280 static void do_mmu_int(void)
2281 {
2282 #if defined(CONFIG_ARCH_OMAP1)
2283
2284         dsp_mmu_reg_t status;
2285         dsp_mmu_reg_t adh, adl;
2286         dsp_mmu_reg_t dp;
2287
2288         status = dsp_mmu_read_reg(DSP_MMU_FAULT_ST);
2289         adh = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_H);
2290         adl = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_L);
2291         dp = adh & DSP_MMU_FAULT_AD_H_DP;
2292         dsp_fault_adr = MK32(adh & DSP_MMU_FAULT_AD_H_ADR_MASK, adl);
2293
2294         /* if the fault is masked, nothing to do */
2295         if ((status & MMUFAULT_MASK) == 0) {
2296                 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
2297                 /*
2298                  * note: in OMAP1710,
2299                  * when CACHE + DMA domain gets out of idle in DSP,
2300                  * MMU interrupt occurs but DSP_MMU_FAULT_ST is not set.
2301                  * in this case, we just ignore the interrupt.
2302                  */
2303                 if (status) {
2304                         printk(KERN_DEBUG "%s%s%s%s\n",
2305                                (status & DSP_MMU_FAULT_ST_PREF)?
2306                                         "  (prefetch err)" : "",
2307                                (status & DSP_MMU_FAULT_ST_PERM)?
2308                                         "  (permission fault)" : "",
2309                                (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2310                                         "  (TLB miss)" : "",
2311                                (status & DSP_MMU_FAULT_ST_TRANS) ?
2312                                         "  (translation fault)": "");
2313                         printk(KERN_DEBUG "fault address = %#08x\n",
2314                                dsp_fault_adr);
2315                 }
2316                 enable_irq(INT_DSP_MMU);
2317                 return;
2318         }
2319
2320 #elif defined(CONFIG_ARCH_OMAP2)
2321
2322         dsp_mmu_reg_t status;
2323
2324         status = dsp_mmu_read_reg(DSP_MMU_IRQSTATUS);
2325         dsp_fault_adr = dsp_mmu_read_reg(DSP_MMU_FAULT_AD);
2326
2327 #endif /* CONFIG_ARCH_OMAP2 */
2328
2329         printk(KERN_INFO "DSP MMU interrupt!\n");
2330
2331 #if defined(CONFIG_ARCH_OMAP1)
2332
2333         printk(KERN_INFO "%s%s%s%s\n",
2334                (status & DSP_MMU_FAULT_ST_PREF)?
2335                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PREF)?
2336                                 "  prefetch err":
2337                                 "  (prefetch err)":
2338                                 "",
2339                (status & DSP_MMU_FAULT_ST_PERM)?
2340                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PERM)?
2341                                 "  permission fault":
2342                                 "  (permission fault)":
2343                                 "",
2344                (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2345                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TLB_MISS)?
2346                                 "  TLB miss":
2347                                 "  (TLB miss)":
2348                                 "",
2349                (status & DSP_MMU_FAULT_ST_TRANS)?
2350                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TRANS)?
2351                                 "  translation fault":
2352                                 "  (translation fault)":
2353                                 "");
2354
2355 #elif defined(CONFIG_ARCH_OMAP2)
2356
2357         printk(KERN_INFO "%s%s%s%s%s\n",
2358                (status & DSP_MMU_IRQ_MULTIHITFAULT)?
2359                         (MMU_IRQ_MASK & DSP_MMU_IRQ_MULTIHITFAULT)?
2360                                 "  multi hit":
2361                                 "  (multi hit)":
2362                                 "",
2363                (status & DSP_MMU_IRQ_TABLEWALKFAULT)?
2364                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TABLEWALKFAULT)?
2365                                 "  table walk fault":
2366                                 "  (table walk fault)":
2367                                 "",
2368                (status & DSP_MMU_IRQ_EMUMISS)?
2369                         (MMU_IRQ_MASK & DSP_MMU_IRQ_EMUMISS)?
2370                                 "  EMU miss":
2371                                 "  (EMU miss)":
2372                                 "",
2373                (status & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2374                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2375                                 "  translation fault":
2376                                 "  (translation fault)":
2377                                 "",
2378                (status & DSP_MMU_IRQ_TLBMISS)?
2379                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TLBMISS)?
2380                                 "  TLB miss":
2381                                 "  (TLB miss)":
2382                                 "");
2383
2384 #endif /* CONFIG_ARCH_OMAP2 */
2385
2386         printk(KERN_INFO "fault address = %#08x\n", dsp_fault_adr);
2387
2388         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
2389                 dsp_err_set(ERRCODE_MMU, (unsigned long)dsp_fault_adr);
2390         else {
2391 #ifdef CONFIG_ARCH_OMAP1
2392                 __dsp_mmu_itack();
2393 #endif
2394                 printk(KERN_INFO "Resetting DSP...\n");
2395                 dsp_cpustat_request(CPUSTAT_RESET);
2396                 /*
2397                  * if we enable followings, semaphore lock should be avoided.
2398                  *
2399                 printk(KERN_INFO "Flushing DSP MMU...\n");
2400                 exmap_flush();
2401                 dsp_mmu_init();
2402                  */
2403         }
2404
2405 #ifdef CONFIG_ARCH_OMAP2
2406         dsp_mmu_disable();
2407         dsp_mmu_write_reg(status, DSP_MMU_IRQSTATUS);
2408         dsp_mmu_enable();
2409 #endif
2410
2411         enable_irq(INT_DSP_MMU);
2412 }
2413
2414 static DECLARE_WORK(mmu_int_work, (void (*)(void *))do_mmu_int, NULL);
2415
2416 /*
2417  * DSP MMU interrupt handler
2418  */
2419
2420 static irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id,
2421                                      struct pt_regs *regs)
2422 {
2423         disable_irq(INT_DSP_MMU);
2424         schedule_work(&mmu_int_work);
2425         return IRQ_HANDLED;
2426 }
2427
2428 /*
2429  *
2430  */
2431 struct file_operations dsp_mem_fops = {
2432         .owner   = THIS_MODULE,
2433         .llseek  = dsp_mem_lseek,
2434         .read    = dsp_mem_read,
2435         .write   = dsp_mem_write,
2436         .ioctl   = dsp_mem_ioctl,
2437         .mmap    = dsp_mem_mmap,
2438         .open    = dsp_mem_open,
2439 };
2440
2441 void dsp_mem_start(void)
2442 {
2443 #ifdef CONFIG_ARCH_OMAP1
2444         dsp_register_mem_cb(intmem_enable, intmem_disable);
2445 #endif
2446 }
2447
2448 void dsp_mem_stop(void)
2449 {
2450         memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
2451 #ifdef CONFIG_ARCH_OMAP1
2452         dsp_unregister_mem_cb();
2453 #endif
2454 }
2455
2456 static char devid_mmu;
2457
2458 int __init dsp_mem_init(void)
2459 {
2460         int i;
2461         int ret = 0;
2462 #ifdef CONFIG_ARCH_OMAP2
2463         int dspmem_pg_count;
2464
2465         dspmem_pg_count = dspmem_size >> 12;
2466         for (i = 0; i < dspmem_pg_count; i++) {
2467                 dsp_ipi_write_reg(i, DSP_IPI_INDEX);
2468                 dsp_ipi_write_reg(DSP_IPI_ENTRY_ELMSIZEVALUE_16, DSP_IPI_ENTRY);
2469         }
2470         dsp_ipi_write_reg(1, DSP_IPI_ENABLE);
2471
2472         dsp_ipi_write_reg(IOMAP_VAL, DSP_IPI_IOMAP);
2473 #endif
2474
2475         for (i = 0; i < DSP_MMU_TLB_LINES; i++)
2476                 exmap_tbl[i].valid = 0;
2477
2478         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
2479         if (dspvect_page == NULL) {
2480                 printk(KERN_ERR
2481                        "omapdsp: failed to allocate memory "
2482                        "for dsp vector table\n");
2483                 return -ENOMEM;
2484         }
2485         dsp_mmu_init();
2486 #ifdef CONFIG_ARCH_OMAP1
2487         dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
2488 #endif
2489
2490         /*
2491          * DSP MMU interrupt setup
2492          */
2493         ret = request_irq(INT_DSP_MMU, dsp_mmu_interrupt, SA_INTERRUPT, "dsp",
2494                           &devid_mmu);
2495         if (ret) {
2496                 printk(KERN_ERR
2497                        "failed to register DSP MMU interrupt: %d\n", ret);
2498                 goto fail;
2499         }
2500
2501         /* MMU interrupt is not enabled until DSP runs */
2502         disable_irq(INT_DSP_MMU);
2503
2504         device_create_file(&dsp_device.dev, &dev_attr_mmu);
2505         device_create_file(&dsp_device.dev, &dev_attr_exmap);
2506         device_create_file(&dsp_device.dev, &dev_attr_mempool);
2507
2508         return 0;
2509
2510 fail:
2511 #ifdef CONFIG_ARCH_OMAP1
2512         dsp_reset_idle_boot_base();
2513 #endif
2514         dsp_mmu_shutdown();
2515         free_page((unsigned long)dspvect_page);
2516         dspvect_page = NULL;
2517         return ret;
2518 }
2519
2520 void dsp_mem_exit(void)
2521 {
2522         free_irq(INT_DSP_MMU, &devid_mmu);
2523
2524         /* recover disable_depth */
2525         enable_irq(INT_DSP_MMU);
2526
2527 #ifdef CONFIG_ARCH_OMAP1
2528         dsp_reset_idle_boot_base();
2529 #endif
2530         dsp_mmu_shutdown();
2531         dsp_kmem_release();
2532
2533         if (dspvect_page != NULL) {
2534                 free_page((unsigned long)dspvect_page);
2535                 dspvect_page = NULL;
2536         }
2537
2538         device_remove_file(&dsp_device.dev, &dev_attr_mmu);
2539         device_remove_file(&dsp_device.dev, &dev_attr_exmap);
2540         device_remove_file(&dsp_device.dev, &dev_attr_mempool);
2541 }