]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dsp/dsp_mem.c
eaf9caf5e0e0c52c606fc661e542ca8a9cfd3548
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dsp / dsp_mem.c
1 /*
2  * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
3  *
4  * Copyright (C) 2002-2006 Nokia Corporation. All rights reserved.
5  *
6  * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
7  *
8  * Conversion to mempool API and ARM MMU section mapping
9  * by Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * version 2 as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23  * 02110-1301 USA
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/fs.h>
30 #include <linux/fb.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/mempool.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <asm/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h>
41 #include <asm/arch/tc.h>
42 #include <asm/arch/omapfb.h>
43 #include <asm/arch/mailbox.h>
44 #include <asm/arch/dsp_common.h>
45 #include "uaccess_dsp.h"
46 #include "dsp_mbcmd.h"
47 #include "../mailbox_hw.h"
48 #include "dsp.h"
49 #include "ioctl.h"
50 #include "ipbuf.h"
51
52 #ifdef CONFIG_ARCH_OMAP2
53 #define IOMAP_VAL       0x3f
54 #endif
55
56 #define SZ_1KB  0x400
57 #define SZ_4KB  0x1000
58 #define SZ_64KB 0x10000
59 #define SZ_1MB  0x100000
60 #define SZ_16MB 0x1000000
61 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
62 #define ORDER_4KB       (12 - PAGE_SHIFT)
63 #define ORDER_64KB      (16 - PAGE_SHIFT)
64 #define ORDER_1MB       (20 - PAGE_SHIFT)
65
66 /*
67  * absorb DSP MMU register size and location difference
68  */
69 #if defined(CONFIG_ARCH_OMAP1)
70 typedef u16 dsp_mmu_reg_t;
71 #define dsp_mmu_read_reg(a)     omap_readw(a)
72 #define dsp_mmu_write_reg(v,a)  omap_writew(v,a)
73 #elif defined(CONFIG_ARCH_OMAP2)
74 typedef u32 dsp_mmu_reg_t;
75 #define dsp_mmu_read_reg(a)     readl(a)
76 #define dsp_mmu_write_reg(v,a)  writel(v,a)
77 #define dsp_ipi_read_reg(a)     readl(a)
78 #define dsp_ipi_write_reg(v,a)  writel(v,a)
79 #endif
80
81 #if defined(CONFIG_ARCH_OMAP1)
82
83 #define dsp_mmu_enable() \
84         do { \
85                 dsp_mmu_write_reg(DSP_MMU_CNTL_MMU_EN | DSP_MMU_CNTL_RESET_SW, \
86                                   DSP_MMU_CNTL); \
87         } while(0)
88 #define dsp_mmu_disable() \
89         do { \
90                 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
91         } while(0)
92 #define __dsp_mmu_itack() \
93         do { \
94                 dsp_mmu_write_reg(DSP_MMU_IT_ACK_IT_ACK, DSP_MMU_IT_ACK); \
95         } while(0)
96
97 #elif defined(CONFIG_ARCH_OMAP2)
98
99 #define dsp_mmu_enable() \
100         do { \
101                 dsp_mmu_write_reg(DSP_MMU_CNTL_MMUENABLE, DSP_MMU_CNTL); \
102         } while(0)
103 #define dsp_mmu_disable() \
104         do { \
105                 dsp_mmu_write_reg(0, DSP_MMU_CNTL); \
106         } while(0)
107 #define dsp_mmu_reset() \
108         do { \
109                 dsp_mmu_write_reg(dsp_mmu_read_reg(DSP_MMU_SYSCONFIG) | \
110                                   DSP_MMU_SYSCONFIG_SOFTRESET, \
111                                   DSP_MMU_SYSCONFIG); \
112         } while(0)
113
114 #endif /* CONFIG_ARCH_OMAP2 */
115
116 #define dsp_mmu_flush() \
117         do { \
118                 dsp_mmu_write_reg(DSP_MMU_FLUSH_ENTRY_FLUSH_ENTRY, \
119                                   DSP_MMU_FLUSH_ENTRY); \
120         } while(0)
121 #define __dsp_mmu_gflush() \
122         do { \
123                 dsp_mmu_write_reg(DSP_MMU_GFLUSH_GFLUSH, DSP_MMU_GFLUSH); \
124         } while(0)
125
126 /*
127  * absorb register name difference
128  */
129 #ifdef CONFIG_ARCH_OMAP1
130 #define DSP_MMU_CAM_P                   DSP_MMU_CAM_L_P
131 #define DSP_MMU_CAM_V                   DSP_MMU_CAM_L_V
132 #define DSP_MMU_CAM_PAGESIZE_MASK       DSP_MMU_CAM_L_PAGESIZE_MASK
133 #define DSP_MMU_CAM_PAGESIZE_1MB        DSP_MMU_CAM_L_PAGESIZE_1MB
134 #define DSP_MMU_CAM_PAGESIZE_64KB       DSP_MMU_CAM_L_PAGESIZE_64KB
135 #define DSP_MMU_CAM_PAGESIZE_4KB        DSP_MMU_CAM_L_PAGESIZE_4KB
136 #define DSP_MMU_CAM_PAGESIZE_1KB        DSP_MMU_CAM_L_PAGESIZE_1KB
137 #endif /* CONFIG_ARCH_OMAP1 */
138
139 /*
140  * OMAP1 EMIFF access
141  */
142 #ifdef CONFIG_ARCH_OMAP1
143 #define EMIF_PRIO_LB_MASK       0x0000f000
144 #define EMIF_PRIO_LB_SHIFT      12
145 #define EMIF_PRIO_DMA_MASK      0x00000f00
146 #define EMIF_PRIO_DMA_SHIFT     8
147 #define EMIF_PRIO_DSP_MASK      0x00000070
148 #define EMIF_PRIO_DSP_SHIFT     4
149 #define EMIF_PRIO_MPU_MASK      0x00000007
150 #define EMIF_PRIO_MPU_SHIFT     0
151 #define set_emiff_dma_prio(prio) \
152         do { \
153                 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
154                              ~EMIF_PRIO_DMA_MASK) | \
155                             ((prio) << EMIF_PRIO_DMA_SHIFT), \
156                             OMAP_TC_OCPT1_PRIOR); \
157         } while(0)
158 #endif /* CONFIG_ARCH_OMAP1 */
159
160 enum exmap_type_e {
161         EXMAP_TYPE_MEM,
162         EXMAP_TYPE_FB
163 };
164
165 struct exmap_tbl_entry {
166         unsigned int valid:1;
167         unsigned int prsvd:1;   /* preserved */
168         int usecount;           /* reference count by mmap */
169         enum exmap_type_e type;
170         void *buf;              /* virtual address of the buffer,
171                                  * i.e. 0xc0000000 - */
172         void *vadr;             /* DSP shadow space,
173                                  * i.e. 0xe0000000 - 0xe0ffffff */
174         unsigned int order;
175         struct {
176                 int prev;
177                 int next;
178         } link;                 /* grouping */
179 };
180
181 #define INIT_EXMAP_TBL_ENTRY(ent,b,v,typ,od) \
182         do {\
183                 (ent)->buf       = (b); \
184                 (ent)->vadr      = (v); \
185                 (ent)->valid     = 1; \
186                 (ent)->prsvd     = 0; \
187                 (ent)->usecount  = 0; \
188                 (ent)->type      = (typ); \
189                 (ent)->order     = (od); \
190                 (ent)->link.next = -1; \
191                 (ent)->link.prev = -1; \
192         } while (0)
193
194 #define INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(ent,b,v) \
195         do {\
196                 (ent)->buf       = (b); \
197                 (ent)->vadr      = (v); \
198                 (ent)->valid     = 1; \
199                 (ent)->prsvd     = 1; \
200                 (ent)->usecount  = 0; \
201                 (ent)->type      = EXMAP_TYPE_MEM; \
202                 (ent)->order     = 0; \
203                 (ent)->link.next = -1; \
204                 (ent)->link.prev = -1; \
205         } while (0)
206
207 #define DSP_MMU_TLB_LINES       32
208 static struct exmap_tbl_entry exmap_tbl[DSP_MMU_TLB_LINES];
209 static int exmap_preserved_cnt;
210 static DECLARE_RWSEM(exmap_sem);
211
212 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
213 static struct omapfb_notifier_block *omapfb_nb;
214 static int omapfb_ready;
215 #endif
216
217 struct cam_ram_regset {
218 #if defined(CONFIG_ARCH_OMAP1)
219         dsp_mmu_reg_t cam_h;
220         dsp_mmu_reg_t cam_l;
221         dsp_mmu_reg_t ram_h;
222         dsp_mmu_reg_t ram_l;
223 #elif defined(CONFIG_ARCH_OMAP2)
224         dsp_mmu_reg_t cam;
225         dsp_mmu_reg_t ram;
226 #endif
227 };
228
229 struct tlb_entry {
230         dsp_long_t va;
231         unsigned long pa;
232         dsp_mmu_reg_t pgsz, prsvd, valid;
233 #if defined(CONFIG_ARCH_OMAP1)
234         dsp_mmu_reg_t ap;
235 #elif defined(CONFIG_ARCH_OMAP2)
236         dsp_mmu_reg_t endian, elsz, mixed;
237 #endif
238 };
239
240 #if defined(CONFIG_ARCH_OMAP1)
241 #define INIT_TLB_ENTRY(ent,v,p,ps) \
242         do { \
243                 (ent)->va = (v); \
244                 (ent)->pa = (p); \
245                 (ent)->pgsz = (ps); \
246                 (ent)->prsvd = 0; \
247                 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
248         } while (0)
249 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
250         do { \
251                 (ent)->va = (v); \
252                 (ent)->pa = (p); \
253                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
254                 (ent)->prsvd = DSP_MMU_CAM_P; \
255                 (ent)->ap = DSP_MMU_RAM_L_AP_FA; \
256         } while (0)
257 #elif defined(CONFIG_ARCH_OMAP2)
258 #define INIT_TLB_ENTRY(ent,v,p,ps) \
259         do { \
260                 (ent)->va = (v); \
261                 (ent)->pa = (p); \
262                 (ent)->pgsz = (ps); \
263                 (ent)->prsvd = 0; \
264                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
265                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
266                 (ent)->mixed = 0; \
267         } while (0)
268 #define INIT_TLB_ENTRY_4KB_PRESERVED(ent,v,p) \
269         do { \
270                 (ent)->va = (v); \
271                 (ent)->pa = (p); \
272                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
273                 (ent)->prsvd = DSP_MMU_CAM_P; \
274                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
275                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_16; \
276                 (ent)->mixed = 0; \
277         } while (0)
278 #define INIT_TLB_ENTRY_4KB_ES32_PRESERVED(ent,v,p) \
279         do { \
280                 (ent)->va = (v); \
281                 (ent)->pa = (p); \
282                 (ent)->pgsz = DSP_MMU_CAM_PAGESIZE_4KB; \
283                 (ent)->prsvd = DSP_MMU_CAM_P; \
284                 (ent)->endian = DSP_MMU_RAM_ENDIANNESS_LITTLE; \
285                 (ent)->elsz = DSP_MMU_RAM_ELEMENTSIZE_32; \
286                 (ent)->mixed = 0; \
287         } while (0)
288 #endif
289
290 #if defined(CONFIG_ARCH_OMAP1)
291 #define cam_ram_valid(cr)       ((cr).cam_l & DSP_MMU_CAM_V)
292 #elif defined(CONFIG_ARCH_OMAP2)
293 #define cam_ram_valid(cr)       ((cr).cam & DSP_MMU_CAM_V)
294 #endif
295
296 struct tlb_lock {
297         int base;
298         int victim;
299 };
300
301 static int dsp_exunmap(dsp_long_t dspadr);
302
303 static void *dspvect_page;
304 static u32 dsp_fault_adr;
305 static struct mem_sync_struct mem_sync;
306
307 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
308                         char *buf);
309 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
310                           char *buf);
311 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
312                             char *buf);
313
314 static struct device_attribute dev_attr_mmu =     __ATTR_RO(mmu);
315 static struct device_attribute dev_attr_exmap =   __ATTR_RO(exmap);
316 static struct device_attribute dev_attr_mempool = __ATTR_RO(mempool);
317
318 /*
319  * special mempool function:
320  * hope this goes to mm/mempool.c
321  */
322 static void *mempool_alloc_from_pool(mempool_t *pool, gfp_t gfp_mask)
323 {
324         unsigned long flags;
325
326         spin_lock_irqsave(&pool->lock, flags);
327         if (likely(pool->curr_nr)) {
328                 void *element = pool->elements[--pool->curr_nr];
329                 spin_unlock_irqrestore(&pool->lock, flags);
330                 return element;
331         }
332         spin_unlock_irqrestore(&pool->lock, flags);
333
334         return mempool_alloc(pool, gfp_mask);
335 }
336
337 static __inline__ unsigned long lineup_offset(unsigned long adr,
338                                               unsigned long ref,
339                                               unsigned long mask)
340 {
341         unsigned long newadr;
342
343         newadr = (adr & ~mask) | (ref & mask);
344         if (newadr < adr)
345                 newadr += mask + 1;
346         return newadr;
347 }
348
349 int dsp_mem_sync_inc(void)
350 {
351         if (dsp_mem_enable((void *)dspmem_base) < 0)
352                 return -1;
353         if (mem_sync.DARAM)
354                 mem_sync.DARAM->ad_arm++;
355         if (mem_sync.SARAM)
356                 mem_sync.SARAM->ad_arm++;
357         if (mem_sync.SDRAM)
358                 mem_sync.SDRAM->ad_arm++;
359         dsp_mem_disable((void *)dspmem_base);
360         return 0;
361 }
362
363 /*
364  * dsp_mem_sync_config() is called from mbox1 workqueue
365  */
366 int dsp_mem_sync_config(struct mem_sync_struct *sync)
367 {
368         size_t sync_seq_sz = sizeof(struct sync_seq);
369
370 #ifdef OLD_BINARY_SUPPORT
371         if (sync == NULL) {
372                 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
373                 return 0;
374         }
375 #endif
376         if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
377             (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
378             (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
379                 printk(KERN_ERR
380                        "omapdsp: mem_sync address validation failure!\n"
381                        "  mem_sync.DARAM = 0x%p,\n"
382                        "  mem_sync.SARAM = 0x%p,\n"
383                        "  mem_sync.SDRAM = 0x%p,\n",
384                        sync->DARAM, sync->SARAM, sync->SDRAM);
385                 return -1;
386         }
387         memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
388         return 0;
389 }
390
391 static mempool_t *kmem_pool_1M;
392 static mempool_t *kmem_pool_64K;
393
394 static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
395 {
396         return (void *)__get_dma_pages(gfp, (unsigned int)order);
397 }
398
399 static void dsp_pool_free(void *buf, void *order)
400 {
401         free_pages((unsigned long)buf, (unsigned int)order);
402 }
403
404 static void dsp_kmem_release(void)
405 {
406         if (kmem_pool_64K) {
407                 mempool_destroy(kmem_pool_64K);
408                 kmem_pool_64K = NULL;
409         }
410
411         if (kmem_pool_1M) {
412                 mempool_destroy(kmem_pool_1M);
413                 kmem_pool_1M = NULL;
414         }
415 }
416
417 static int dsp_kmem_reserve(unsigned long size)
418 {
419         unsigned long len = size;
420
421         /* alignment check */
422         if (!is_aligned(size, SZ_64KB)) {
423                 printk(KERN_ERR
424                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
425                 return -EINVAL;
426         }
427
428         if (size > DSPSPACE_SIZE) {
429                 printk(KERN_ERR
430                        "omapdsp: size(0x%lx) is larger than DSP memory space "
431                        "size (0x%x.\n", size, DSPSPACE_SIZE);
432                 return -EINVAL;
433         }
434
435         if (size >= SZ_1MB) {
436                 int nr = size >> 20;
437
438                 if (likely(!kmem_pool_1M))
439                         kmem_pool_1M = mempool_create(nr,
440                                                       dsp_pool_alloc,
441                                                       dsp_pool_free,
442                                                       (void *)ORDER_1MB);
443                 else
444                         mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
445                                        GFP_KERNEL);
446
447                 size &= ~(0xf << 20);
448         }
449
450         if (size >= SZ_64KB) {
451                 int nr = size >> 16;
452
453                 if (likely(!kmem_pool_64K))
454                         kmem_pool_64K = mempool_create(nr,
455                                                        dsp_pool_alloc,
456                                                        dsp_pool_free,
457                                                        (void *)ORDER_64KB);
458                 else
459                         mempool_resize(kmem_pool_64K,
460                                        kmem_pool_64K->min_nr + nr, GFP_KERNEL);
461
462                 size &= ~(0xf << 16);
463         }
464
465         if (size)
466                 len -= size;
467
468         return len;
469 }
470
471 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
472 {
473         struct page *page, *ps, *pe;
474
475         ps = virt_to_page(buf);
476         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
477
478         for (page = ps; page < pe; page++)
479                 ClearPageReserved(page);
480
481         if ((order == ORDER_64KB) && likely(kmem_pool_64K))
482                 mempool_free((void *)buf, kmem_pool_64K);
483         else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
484                 mempool_free((void *)buf, kmem_pool_1M);
485         else
486                 free_pages(buf, order);
487 }
488
489 static inline void
490 exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
491 {
492         pgd_t *pgd;
493         pud_t *pud;
494         pmd_t *pmd;
495         pte_t *pte;
496
497         pgd = pgd_offset_k(virt);
498         pud = pud_offset(pgd, virt);
499         pmd = pmd_offset(pud, virt);
500
501         if (pmd_none(*pmd)) {
502                 pte = pte_alloc_one_kernel(&init_mm, 0);
503                 if (!pte)
504                         return;
505
506                 /* note: two PMDs will be set  */
507                 pmd_populate_kernel(&init_mm, pmd, pte);
508         }
509
510         pte = pte_offset_kernel(pmd, virt);
511         set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
512 }
513
514 #if 0
515 static inline int
516 exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
517 {
518         pgd_t *pgd;
519         pud_t *pud;
520         pmd_t *pmd;
521
522         pgd = pgd_offset_k(virt);
523         pud = pud_alloc(&init_mm, pgd, virt);
524         pmd = pmd_alloc(&init_mm, pud, virt);
525
526         if (virt & (1 << 20))
527                 pmd++;
528
529         if (!pmd_none(*pmd))
530                 /* No good, fall back on smaller mappings. */
531                 return -EINVAL;
532
533         *pmd = __pmd(phys | prot);
534         flush_pmd_entry(pmd);
535
536         return 0;
537 }
538 #endif
539
540 /*
541  * ARM MMU operations
542  */
543 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
544                             unsigned long size)
545 {
546         long off;
547         pgprot_t prot_pte;
548         int prot_sect;
549
550         printk(KERN_DEBUG
551                "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
552                virt, phys, size);
553
554         prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
555                             L_PTE_DIRTY | L_PTE_WRITE);
556
557         prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
558                     PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
559
560         if (cpu_architecture() <= CPU_ARCH_ARMv5)
561                 prot_sect |= PMD_BIT4;
562
563         off = phys - virt;
564
565         while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
566                 exmap_alloc_pte(virt, virt + off, prot_pte);
567
568                 virt += PAGE_SIZE;
569                 size -= PAGE_SIZE;
570         }
571
572         /* XXX: Not yet.. confuses dspfb -- PFM. */
573 #if 0
574         while (size >= (PGDIR_SIZE / 2)) {
575                 if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
576                         break;
577
578                 virt += (PGDIR_SIZE / 2);
579                 size -= (PGDIR_SIZE / 2);
580         }
581 #endif
582
583         while (size >= PAGE_SIZE) {
584                 exmap_alloc_pte(virt, virt + off, prot_pte);
585
586                 virt += PAGE_SIZE;
587                 size -= PAGE_SIZE;
588         }
589
590         BUG_ON(size);
591
592         return 0;
593 }
594
595         /* XXX: T.Kobayashi
596          * A process can have old mappings. if we want to clear a pmd,
597          * we need to do it for all proceeses that use the old mapping.
598          */
599 #if 0
600 static inline void
601 exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
602 {
603         pte_t *pte;
604
605         pte = pte_offset_map(pmd, addr);
606         do {
607                 if (pte_none(*pte))
608                         continue;
609
610                 pte_clear(&init_mm, addr, pte);
611         } while (pte++, addr += PAGE_SIZE, addr != end);
612
613         pte_unmap(pte - 1);
614 }
615
616 static inline void
617 exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
618 {
619         pmd_t *pmd;
620         unsigned long next;
621
622         pmd = pmd_offset(pud, addr);
623         do {
624                 next = pmd_addr_end(addr, end);
625
626                 if (addr & (1 << 20))
627                         pmd++;
628
629                 if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
630                         *pmd = __pmd(0);
631                         clean_pmd_entry(pmd);
632                         continue;
633                 }
634
635                 if (pmd_none_or_clear_bad(pmd))
636                         continue;
637
638                 exmap_clear_pte_range(pmd, addr, next);
639         } while (pmd++, addr = next, addr != end);
640 }
641
642 static inline void
643 exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
644 {
645         pud_t *pud;
646         unsigned long next;
647
648         pud = pud_offset(pgd, addr);
649         do {
650                 next = pud_addr_end(addr, end);
651                 if (pud_none_or_clear_bad(pud))
652                         continue;
653
654                 exmap_clear_pmd_range(pud, addr, next);
655         } while (pud++, addr = next, addr != end);
656 }
657 #endif
658
659 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
660 {
661 #if 0
662         unsigned long next, end;
663         pgd_t *pgd;
664
665         printk(KERN_DEBUG
666                "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
667                virt, size);
668
669         pgd = pgd_offset_k(virt);
670         end = virt + size;
671         do {
672                 next = pgd_addr_end(virt, end);
673                 if (pgd_none_or_clear_bad(pgd))
674                         continue;
675
676                 exmap_clear_pud_range(pgd, virt, next);
677         } while (pgd++, virt = next, virt != end);
678 #else
679         pgd_t *pgd;
680         pud_t *pud;
681         pmd_t *pmd;
682         pte_t *pte;
683
684         printk(KERN_DEBUG
685                "omapdsp: unmapping in ARM MMU, v=%#010lx, sz=%#lx\n",
686                virt, size);
687
688         while (size >= PAGE_SIZE) {
689                 pgd = pgd_offset_k(virt);
690                 pud = pud_offset(pgd, virt);
691                 pmd = pmd_offset(pud, virt);
692                 pte = pte_offset_kernel(pmd, virt);
693
694                 pte_clear(&init_mm, virt, pte);
695                 size -= PAGE_SIZE;
696                 virt += PAGE_SIZE;
697         }
698
699         BUG_ON(size);
700 #endif
701 }
702
703 static int exmap_valid(void *vadr, size_t len)
704 {
705         /* exmap_sem should be held before calling this function */
706         int i;
707
708 start:
709         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
710                 void *mapadr;
711                 unsigned long mapsize;
712                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
713
714                 if (!ent->valid)
715                         continue;
716                 mapadr = (void *)ent->vadr;
717                 mapsize = 1 << (ent->order + PAGE_SHIFT);
718                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
719                         if (vadr + len <= mapadr + mapsize) {
720                                 /* this map covers whole address. */
721                                 return 1;
722                         } else {
723                                 /*
724                                  * this map covers partially.
725                                  * check rest portion.
726                                  */
727                                 len -= mapadr + mapsize - vadr;
728                                 vadr = mapadr + mapsize;
729                                 goto start;
730                         }
731                 }
732         }
733
734         return 0;
735 }
736
737 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
738 {
739         void *ds = (void *)daram_base;
740         void *de = (void *)daram_base + daram_size;
741         void *ss = (void *)saram_base;
742         void *se = (void *)saram_base + saram_size;
743         int ret;
744
745         if ((vadr >= ds) && (vadr < de)) {
746                 if (vadr + len > de)
747                         return MEM_TYPE_CROSSING;
748                 else
749                         return MEM_TYPE_DARAM;
750         } else if ((vadr >= ss) && (vadr < se)) {
751                 if (vadr + len > se)
752                         return MEM_TYPE_CROSSING;
753                 else
754                         return MEM_TYPE_SARAM;
755         } else {
756                 down_read(&exmap_sem);
757                 if (exmap_valid(vadr, len))
758                         ret = MEM_TYPE_EXTERN;
759                 else
760                         ret = MEM_TYPE_NONE;
761                 up_read(&exmap_sem);
762                 return ret;
763         }
764 }
765
766 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
767 {
768         if (dsp_mem_type(p, len) <= 0) {
769                 if (fmt != NULL) {
770                         char s[64];
771                         va_list args;
772
773                         va_start(args, fmt);
774                         vsprintf(s, fmt, args);
775                         va_end(args);
776                         printk(KERN_ERR
777                                "omapdsp: %s address(0x%p) and size(0x%x) is "
778                                "not valid!\n"
779                                "         (crossing different type of memories, or \n"
780                                "          external memory space where no "
781                                "actual memory is mapped)\n",
782                                s, p, len);
783                 }
784                 return -1;
785         }
786
787         return 0;
788 }
789
790 /*
791  * exmap_use(), unuse():
792  * when the mapped area is exported to user space with mmap,
793  * the usecount is incremented.
794  * while the usecount > 0, that area can't be released.
795  */
796 void exmap_use(void *vadr, size_t len)
797 {
798         int i;
799
800         down_write(&exmap_sem);
801         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
802                 void *mapadr;
803                 unsigned long mapsize;
804                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
805
806                 if (!ent->valid)
807                         continue;
808                 mapadr = (void *)ent->vadr;
809                 mapsize = 1 << (ent->order + PAGE_SHIFT);
810                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
811                         ent->usecount++;
812         }
813         up_write(&exmap_sem);
814 }
815
816 void exmap_unuse(void *vadr, size_t len)
817 {
818         int i;
819
820         down_write(&exmap_sem);
821         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
822                 void *mapadr;
823                 unsigned long mapsize;
824                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
825
826                 if (!ent->valid)
827                         continue;
828                 mapadr = (void *)ent->vadr;
829                 mapsize = 1 << (ent->order + PAGE_SHIFT);
830                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
831                         ent->usecount--;
832         }
833         up_write(&exmap_sem);
834 }
835
836 /*
837  * dsp_virt_to_phys()
838  * returns physical address, and sets len to valid length
839  */
840 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
841 {
842         int i;
843
844         if (is_dsp_internal_mem(vadr)) {
845                 /* DSRAM or SARAM */
846                 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
847                 return (unsigned long)vadr;
848         }
849
850         /* EXRAM */
851         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
852                 void *mapadr;
853                 unsigned long mapsize;
854                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
855
856                 if (!ent->valid)
857                         continue;
858                 mapadr = (void *)ent->vadr;
859                 mapsize = 1 << (ent->order + PAGE_SHIFT);
860                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
861                         *len = mapadr + mapsize - vadr;
862                         return __pa(ent->buf) + vadr - mapadr;
863                 }
864         }
865
866         /* valid mapping not found */
867         return 0;
868 }
869
870 /*
871  * DSP MMU operations
872  */
873 #ifdef CONFIG_ARCH_OMAP1
874 static dsp_mmu_reg_t get_cam_l_va_mask(dsp_mmu_reg_t pgsz)
875 {
876         switch (pgsz) {
877         case DSP_MMU_CAM_PAGESIZE_1MB:
878                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
879                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
880         case DSP_MMU_CAM_PAGESIZE_64KB:
881                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
882                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
883         case DSP_MMU_CAM_PAGESIZE_4KB:
884                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
885                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
886         case DSP_MMU_CAM_PAGESIZE_1KB:
887                 return DSP_MMU_CAM_L_VA_TAG_L1_MASK |
888                        DSP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
889         }
890         return 0;
891 }
892 #endif /* CONFIG_ARCH_OMAP1 */
893
894 #if defined(CONFIG_ARCH_OMAP1)
895 #define get_cam_va_mask(pgsz) \
896         ((u32)DSP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
897          (u32)get_cam_l_va_mask(pgsz) << 6)
898 #elif defined(CONFIG_ARCH_OMAP2)
899 #define get_cam_va_mask(pgsz) \
900         ((pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
901          (pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? 0xfff00000 : \
902          (pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
903          (pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? 0xfffff000 : 0)
904 #endif /* CONFIG_ARCH_OMAP2 */
905
906 static void get_tlb_lock(struct tlb_lock *tlb_lock)
907 {
908         dsp_mmu_reg_t lock = dsp_mmu_read_reg(DSP_MMU_LOCK);
909
910         tlb_lock->base = (lock & DSP_MMU_LOCK_BASE_MASK) >>
911                          DSP_MMU_LOCK_BASE_SHIFT;
912         tlb_lock->victim = (lock & DSP_MMU_LOCK_VICTIM_MASK) >>
913                            DSP_MMU_LOCK_VICTIM_SHIFT;
914 }
915
916 static void set_tlb_lock(struct tlb_lock *tlb_lock)
917 {
918         dsp_mmu_write_reg((tlb_lock->base   << DSP_MMU_LOCK_BASE_SHIFT) |
919                           (tlb_lock->victim << DSP_MMU_LOCK_VICTIM_SHIFT),
920                           DSP_MMU_LOCK);
921 }
922
923 static void __read_tlb(struct tlb_lock *tlb_lock, struct cam_ram_regset *cr)
924 {
925         /* set victim */
926         set_tlb_lock(tlb_lock);
927
928 #if defined(CONFIG_ARCH_OMAP1)
929         /* read a TLB entry */
930         dsp_mmu_write_reg(DSP_MMU_LD_TLB_RD, DSP_MMU_LD_TLB);
931
932         cr->cam_h = dsp_mmu_read_reg(DSP_MMU_READ_CAM_H);
933         cr->cam_l = dsp_mmu_read_reg(DSP_MMU_READ_CAM_L);
934         cr->ram_h = dsp_mmu_read_reg(DSP_MMU_READ_RAM_H);
935         cr->ram_l = dsp_mmu_read_reg(DSP_MMU_READ_RAM_L);
936 #elif defined(CONFIG_ARCH_OMAP2)
937         cr->cam = dsp_mmu_read_reg(DSP_MMU_READ_CAM);
938         cr->ram = dsp_mmu_read_reg(DSP_MMU_READ_RAM);
939 #endif
940 }
941
942 static void __load_tlb(struct cam_ram_regset *cr)
943 {
944 #if defined(CONFIG_ARCH_OMAP1)
945         dsp_mmu_write_reg(cr->cam_h, DSP_MMU_CAM_H);
946         dsp_mmu_write_reg(cr->cam_l, DSP_MMU_CAM_L);
947         dsp_mmu_write_reg(cr->ram_h, DSP_MMU_RAM_H);
948         dsp_mmu_write_reg(cr->ram_l, DSP_MMU_RAM_L);
949 #elif defined(CONFIG_ARCH_OMAP2)
950         dsp_mmu_write_reg(cr->cam | DSP_MMU_CAM_V, DSP_MMU_CAM);
951         dsp_mmu_write_reg(cr->ram, DSP_MMU_RAM);
952 #endif
953
954         /* flush the entry */
955         dsp_mmu_flush();
956
957         /* load a TLB entry */
958         dsp_mmu_write_reg(DSP_MMU_LD_TLB_LD, DSP_MMU_LD_TLB);
959 }
960
961 static int dsp_mmu_load_tlb(struct tlb_entry *tlb_ent)
962 {
963         struct tlb_lock tlb_lock;
964         struct cam_ram_regset cr;
965
966 #ifdef CONFIG_ARCH_OMAP1
967         clk_enable(dsp_ck_handle);
968         omap_dsp_request_mem();
969 #endif
970
971         get_tlb_lock(&tlb_lock);
972         for (tlb_lock.victim = 0;
973              tlb_lock.victim < tlb_lock.base;
974              tlb_lock.victim++) {
975                 struct cam_ram_regset tmp_cr;
976
977                 /* read a TLB entry */
978                 __read_tlb(&tlb_lock, &tmp_cr);
979                 if (!cam_ram_valid(tmp_cr))
980                         goto found_victim;
981         }
982         set_tlb_lock(&tlb_lock);
983
984 found_victim:
985         /* The last (31st) entry cannot be locked? */
986         if (tlb_lock.victim == 31) {
987                 printk(KERN_ERR "omapdsp: TLB is full.\n");
988                 return -EBUSY;
989         }
990
991         if (tlb_ent->va & ~get_cam_va_mask(tlb_ent->pgsz)) {
992                 printk(KERN_ERR
993                        "omapdsp: mapping vadr (0x%06x) is not "
994                        "aligned boundary\n", tlb_ent->va);
995                 return -EINVAL;
996         }
997
998 #if defined(CONFIG_ARCH_OMAP1)
999         cr.cam_h = tlb_ent->va >> 22;
1000         cr.cam_l = (tlb_ent->va >> 6 & get_cam_l_va_mask(tlb_ent->pgsz)) |
1001                    tlb_ent->prsvd | tlb_ent->pgsz;
1002         cr.ram_h = tlb_ent->pa >> 16;
1003         cr.ram_l = (tlb_ent->pa & DSP_MMU_RAM_L_RAM_LSB_MASK) | tlb_ent->ap;
1004 #elif defined(CONFIG_ARCH_OMAP2)
1005         cr.cam = (tlb_ent->va & DSP_MMU_CAM_VATAG_MASK) |
1006                  tlb_ent->prsvd | tlb_ent->pgsz;
1007         cr.ram = tlb_ent->pa | tlb_ent->endian | tlb_ent->elsz;
1008 #endif
1009         __load_tlb(&cr);
1010
1011         /* update lock base */
1012         if (tlb_lock.victim == tlb_lock.base)
1013                 tlb_lock.base++;
1014         tlb_lock.victim = tlb_lock.base;
1015         set_tlb_lock(&tlb_lock);
1016
1017 #ifdef CONFIG_ARCH_OMAP1
1018         omap_dsp_release_mem();
1019         clk_disable(dsp_ck_handle);
1020 #endif
1021         return 0;
1022 }
1023
1024 static int dsp_mmu_clear_tlb(dsp_long_t vadr)
1025 {
1026         struct tlb_lock tlb_lock;
1027         int i;
1028         int max_valid = 0;
1029
1030 #ifdef CONFIG_ARCH_OMAP1
1031         clk_enable(dsp_ck_handle);
1032         omap_dsp_request_mem();
1033 #endif
1034
1035         get_tlb_lock(&tlb_lock);
1036         for (i = 0; i < tlb_lock.base; i++) {
1037                 struct cam_ram_regset cr;
1038                 dsp_long_t cam_va;
1039                 dsp_mmu_reg_t pgsz;
1040
1041                 /* read a TLB entry */
1042                 tlb_lock.victim = i;
1043                 __read_tlb(&tlb_lock, &cr);
1044                 if (!cam_ram_valid(cr))
1045                         continue;
1046
1047 #if defined(CONFIG_ARCH_OMAP1)
1048                 pgsz = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
1049                 cam_va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
1050                          (u32)(cr.cam_l & get_cam_l_va_mask(pgsz)) << 6;
1051 #elif defined(CONFIG_ARCH_OMAP2)
1052                 pgsz = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
1053                 cam_va = cr.cam & get_cam_va_mask(pgsz);
1054 #endif
1055
1056                 if (cam_va == vadr)
1057                         /* flush the entry */
1058                         dsp_mmu_flush();
1059                 else
1060                         max_valid = i;
1061         }
1062
1063         /* set new lock base */
1064         tlb_lock.base   = max_valid + 1;
1065         tlb_lock.victim = max_valid + 1;
1066         set_tlb_lock(&tlb_lock);
1067
1068 #ifdef CONFIG_ARCH_OMAP1
1069         omap_dsp_release_mem();
1070         clk_disable(dsp_ck_handle);
1071 #endif
1072         return 0;
1073 }
1074
1075 static void dsp_mmu_gflush(void)
1076 {
1077         struct tlb_lock tlb_lock;
1078
1079 #ifdef CONFIG_ARCH_OMAP1
1080         clk_enable(dsp_ck_handle);
1081         omap_dsp_request_mem();
1082 #endif
1083
1084         __dsp_mmu_gflush();
1085         tlb_lock.base   = exmap_preserved_cnt;
1086         tlb_lock.victim = exmap_preserved_cnt;
1087         set_tlb_lock(&tlb_lock);
1088
1089 #ifdef CONFIG_ARCH_OMAP1
1090         omap_dsp_release_mem();
1091         clk_disable(dsp_ck_handle);
1092 #endif
1093 }
1094
1095 /*
1096  * dsp_exmap()
1097  *
1098  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
1099  * In this case, the buffer for DSP is allocated in this routine,
1100  * then it is mapped.
1101  * On the other hand, for example - frame buffer sharing, calls
1102  * this function with padr set. It means some known address space
1103  * pointed with padr is going to be shared with DSP.
1104  */
1105 static int dsp_exmap(dsp_long_t dspadr, unsigned long padr, unsigned long size,
1106                      enum exmap_type_e type)
1107 {
1108         dsp_mmu_reg_t pgsz;
1109         void *buf;
1110         unsigned int order = 0;
1111         unsigned long unit;
1112         int prev = -1;
1113         dsp_long_t _dspadr = dspadr;
1114         unsigned long _padr = padr;
1115         void *_vadr = dspbyte_to_virt(dspadr);
1116         unsigned long _size = size;
1117         struct tlb_entry tlb_ent;
1118         struct exmap_tbl_entry *exmap_ent;
1119         int status;
1120         int idx;
1121         int i;
1122
1123 #define MINIMUM_PAGESZ  SZ_4KB
1124         /*
1125          * alignment check
1126          */
1127         if (!is_aligned(size, MINIMUM_PAGESZ)) {
1128                 printk(KERN_ERR
1129                        "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
1130                 return -EINVAL;
1131         }
1132         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
1133                 printk(KERN_ERR
1134                        "omapdsp: DSP address(0x%x) is not aligned.\n", dspadr);
1135                 return -EINVAL;
1136         }
1137         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
1138                 printk(KERN_ERR
1139                        "omapdsp: physical address(0x%lx) is not aligned.\n",
1140                        padr);
1141                 return -EINVAL;
1142         }
1143
1144         /* address validity check */
1145         if ((dspadr < dspmem_size) ||
1146             (dspadr >= DSPSPACE_SIZE) ||
1147             ((dspadr + size > DSP_INIT_PAGE) &&
1148              (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
1149                 printk(KERN_ERR
1150                        "omapdsp: illegal address/size for dsp_exmap().\n");
1151                 return -EINVAL;
1152         }
1153
1154         down_write(&exmap_sem);
1155
1156         /* overlap check */
1157         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1158                 unsigned long mapsize;
1159                 struct exmap_tbl_entry *tmp_ent = &exmap_tbl[i];
1160
1161                 if (!tmp_ent->valid)
1162                         continue;
1163                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
1164                 if ((_vadr + size > tmp_ent->vadr) &&
1165                     (_vadr < tmp_ent->vadr + mapsize)) {
1166                         printk(KERN_ERR "omapdsp: exmap page overlap!\n");
1167                         up_write(&exmap_sem);
1168                         return -EINVAL;
1169                 }
1170         }
1171
1172 start:
1173         buf = NULL;
1174         /* Are there any free TLB lines?  */
1175         for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1176                 if (!exmap_tbl[idx].valid)
1177                         goto found_free;
1178         }
1179         printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
1180         status = -EBUSY;
1181         goto fail;
1182
1183 found_free:
1184         exmap_ent = &exmap_tbl[idx];
1185
1186         /*
1187          * we don't use
1188          * 1KB mapping in OMAP1,
1189          * 16MB mapping in OMAP2.
1190          */
1191         if ((_size >= SZ_1MB) &&
1192             (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
1193             is_aligned(_dspadr, SZ_1MB)) {
1194                 unit = SZ_1MB;
1195                 pgsz = DSP_MMU_CAM_PAGESIZE_1MB;
1196         } else if ((_size >= SZ_64KB) &&
1197                    (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
1198                    is_aligned(_dspadr, SZ_64KB)) {
1199                 unit = SZ_64KB;
1200                 pgsz = DSP_MMU_CAM_PAGESIZE_64KB;
1201         } else {
1202                 unit = SZ_4KB;
1203                 pgsz = DSP_MMU_CAM_PAGESIZE_4KB;
1204         }
1205
1206         order = get_order(unit);
1207
1208         /* buffer allocation */
1209         if (type == EXMAP_TYPE_MEM) {
1210                 struct page *page, *ps, *pe;
1211
1212                 if ((order == ORDER_1MB) && likely(kmem_pool_1M))
1213                         buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
1214                 else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
1215                         buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
1216                 else {
1217                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
1218                         if (buf == NULL) {
1219                                 status = -ENOMEM;
1220                                 goto fail;
1221                         }
1222                 }
1223
1224                 /* mark the pages as reserved; this is needed for mmap */
1225                 ps = virt_to_page(buf);
1226                 pe = virt_to_page(buf + unit);
1227
1228                 for (page = ps; page < pe; page++)
1229                         SetPageReserved(page);
1230
1231                 _padr = __pa(buf);
1232         }
1233
1234         /*
1235          * mapping for ARM MMU:
1236          * we should not access to the allocated memory through 'buf'
1237          * since this area should not be cashed.
1238          */
1239         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
1240         if (status < 0)
1241                 goto fail;
1242
1243         /* loading DSP TLB entry */
1244         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
1245         status = dsp_mmu_load_tlb(&tlb_ent);
1246         if (status < 0) {
1247                 exmap_clear_armmmu((unsigned long)_vadr, unit);
1248                 goto fail;
1249         }
1250
1251         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
1252         exmap_ent->link.prev = prev;
1253         if (prev >= 0)
1254                 exmap_tbl[prev].link.next = idx;
1255
1256         if ((_size -= unit) == 0) {     /* normal completion */
1257                 up_write(&exmap_sem);
1258                 return size;
1259         }
1260
1261         _dspadr += unit;
1262         _vadr   += unit;
1263         _padr = padr ? _padr + unit : 0;
1264         prev = idx;
1265         goto start;
1266
1267 fail:
1268         up_write(&exmap_sem);
1269         if (buf)
1270                 dsp_mem_free_pages((unsigned long)buf, order);
1271         dsp_exunmap(dspadr);
1272         return status;
1273 }
1274
1275 static unsigned long unmap_free_arm(struct exmap_tbl_entry *ent)
1276 {
1277         unsigned long size;
1278
1279         /* clearing ARM MMU */
1280         size = 1 << (ent->order + PAGE_SHIFT);
1281         exmap_clear_armmmu((unsigned long)ent->vadr, size);
1282
1283         /* freeing allocated memory */
1284         if (ent->type == EXMAP_TYPE_MEM) {
1285                 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
1286                 printk(KERN_DEBUG
1287                        "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
1288                        size, ent->buf);
1289         }
1290 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1291         else if (ent->type == EXMAP_TYPE_FB) {
1292                 int status;
1293                 if (omapfb_nb) {
1294                         status = omapfb_unregister_client(omapfb_nb);
1295                         if (!status)
1296                                 printk("omapfb_unregister_client(): "
1297                                        "success\n");
1298                         else
1299                                 printk("omapfb_runegister_client(): "
1300                                        "failure(%d)\n", status);
1301                         kfree(omapfb_nb);
1302                         omapfb_nb = NULL;
1303                         omapfb_ready = 0;
1304                 }
1305         }
1306 #endif
1307
1308         return size;
1309 }
1310
1311 static int dsp_exunmap(dsp_long_t dspadr)
1312 {
1313         void *vadr;
1314         unsigned long size;
1315         int total = 0;
1316         struct exmap_tbl_entry *ent;
1317         int idx;
1318
1319         vadr = dspbyte_to_virt(dspadr);
1320         down_write(&exmap_sem);
1321         for (idx = 0; idx < DSP_MMU_TLB_LINES; idx++) {
1322                 ent = &exmap_tbl[idx];
1323                 if ((!ent->valid) || ent->prsvd)
1324                         continue;
1325                 if (ent->vadr == vadr)
1326                         goto found_map;
1327         }
1328         up_write(&exmap_sem);
1329         printk(KERN_WARNING
1330                "omapdsp: address %06x not found in exmap_tbl.\n", dspadr);
1331         return -EINVAL;
1332
1333 found_map:
1334         if (ent->usecount > 0) {
1335                 printk(KERN_ERR
1336                        "omapdsp: exmap reference count is not 0.\n"
1337                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
1338                        idx, ent->vadr, ent->order, ent->usecount);
1339                 up_write(&exmap_sem);
1340                 return -EINVAL;
1341         }
1342         /* clearing DSP TLB entry */
1343         dsp_mmu_clear_tlb(dspadr);
1344
1345         /* clear ARM MMU and free buffer */
1346         size = unmap_free_arm(ent);
1347         ent->valid = 0;
1348         total += size;
1349
1350         /* we don't free PTEs */
1351
1352         /* flush TLB */
1353         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
1354
1355         if ((idx = ent->link.next) < 0)
1356                 goto up_out;    /* normal completion */
1357         ent = &exmap_tbl[idx];
1358         dspadr += size;
1359         vadr   += size;
1360         if (ent->vadr == vadr)
1361                 goto found_map; /* continue */
1362
1363         printk(KERN_ERR
1364                "omapdsp: illegal exmap_tbl grouping!\n"
1365                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1366                vadr, idx, ent->vadr);
1367         up_write(&exmap_sem);
1368         return -EINVAL;
1369
1370 up_out:
1371         up_write(&exmap_sem);
1372         return total;
1373 }
1374
1375 static void exmap_flush(void)
1376 {
1377         struct exmap_tbl_entry *ent;
1378         int i;
1379
1380         down_write(&exmap_sem);
1381
1382         /* clearing DSP TLB entry */
1383         dsp_mmu_gflush();
1384
1385         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
1386                 ent = &exmap_tbl[i];
1387                 if (ent->valid && (!ent->prsvd)) {
1388                         unmap_free_arm(ent);
1389                         ent->valid = 0;
1390                 }
1391         }
1392
1393         /* flush TLB */
1394         flush_tlb_kernel_range(dspmem_base + dspmem_size,
1395                                dspmem_base + DSPSPACE_SIZE);
1396         up_write(&exmap_sem);
1397 }
1398
1399 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1400 #ifndef CONFIG_FB
1401 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1402 #endif /* CONFIG_FB */
1403
1404 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1405 static int omapfb_notifier_cb(struct notifier_block *omapfb_nb,
1406                               unsigned long event, void *fbi)
1407 {
1408         /* XXX */
1409         printk("omapfb_notifier_cb(): event = %s\n",
1410                (event == OMAPFB_EVENT_READY)    ? "READY" :
1411                (event == OMAPFB_EVENT_DISABLED) ? "DISABLED" : "Unknown");
1412         if (event == OMAPFB_EVENT_READY)
1413                 omapfb_ready = 1;
1414         else if (event == OMAPFB_EVENT_DISABLED)
1415                 omapfb_ready = 0;
1416         return 0;
1417 }
1418 #endif
1419
1420 static int dsp_fbexport(dsp_long_t *dspadr)
1421 {
1422         dsp_long_t dspadr_actual;
1423         unsigned long padr_sys, padr, fbsz_sys, fbsz;
1424         int cnt;
1425 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1426         int status;
1427 #endif
1428
1429         printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1430
1431 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1432         if (omapfb_nb) {
1433                 printk(KERN_WARNING
1434                        "omapdsp: frame buffer has been exported already!\n");
1435                 return -EBUSY;
1436         }
1437 #endif
1438
1439         if (num_registered_fb == 0) {
1440                 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1441                 return -EINVAL;
1442         }
1443         if (num_registered_fb != 1) {
1444                 printk(KERN_INFO
1445                        "omapdsp: %d frame buffers found. we use first one.\n",
1446                        num_registered_fb);
1447         }
1448         padr_sys = registered_fb[0]->fix.smem_start;
1449         fbsz_sys = registered_fb[0]->fix.smem_len;
1450         if (fbsz_sys == 0) {
1451                 printk(KERN_ERR
1452                        "omapdsp: framebuffer doesn't seem to be configured "
1453                        "correctly! (size=0)\n");
1454                 return -EINVAL;
1455         }
1456
1457         /*
1458          * align padr and fbsz to 4kB boundary
1459          * (should be noted to the user afterwards!)
1460          */
1461         padr = padr_sys & ~(SZ_4KB-1);
1462         fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1463
1464         /* line up dspadr offset with padr */
1465         dspadr_actual =
1466                 (fbsz > SZ_1MB) ?  lineup_offset(*dspadr, padr, SZ_1MB-1) :
1467                 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1468                 /* (fbsz > SZ_4KB) ? */ *dspadr;
1469         if (dspadr_actual != *dspadr)
1470                 printk(KERN_DEBUG
1471                        "omapdsp: actual dspadr for FBEXPORT = %08x\n",
1472                        dspadr_actual);
1473         *dspadr = dspadr_actual;
1474
1475         cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1476         if (cnt < 0) {
1477                 printk(KERN_ERR "omapdsp: exmap failure.\n");
1478                 return cnt;
1479         }
1480
1481         if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1482                 printk(KERN_WARNING
1483 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1484 "  !!  screen base address or size is not aligned in 4kB:           !!\n"
1485 "  !!    actual screen  adr = %08lx, size = %08lx             !!\n"
1486 "  !!    exporting      adr = %08lx, size = %08lx             !!\n"
1487 "  !!  Make sure that the framebuffer is allocated with 4kB-order!  !!\n"
1488 "  !!  Otherwise DSP can corrupt the kernel memory.                 !!\n"
1489 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1490                        padr_sys, fbsz_sys, padr, fbsz);
1491         }
1492
1493 #ifdef CONFIG_ARCH_OMAP1
1494         /* increase the DMA priority */
1495         set_emiff_dma_prio(15);
1496 #endif
1497
1498 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1499         omapfb_nb = kmalloc(sizeof(struct omapfb_notifier_block), GFP_KERNEL);
1500         if (omapfb_nb == NULL) {
1501                 printk(KERN_ERR
1502                        "omapdsp: failed to allocate memory for omapfb_nb!\n");
1503                 dsp_exunmap(dspadr_actual);
1504                 return -ENOMEM;
1505         }
1506         status = omapfb_register_client(omapfb_nb, omapfb_notifier_cb, NULL);
1507         if (!status)
1508                 printk("omapfb_register_client(): success\n");
1509         else
1510                 printk("omapfb_register_client(): failure(%d)\n", status);
1511 #endif
1512
1513         return cnt;
1514 }
1515
1516 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1517
1518 static int dsp_fbexport(dsp_long_t *dspadr)
1519 {
1520         printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1521         return -EINVAL;
1522 }
1523
1524 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1525
1526 static void exmap_setup_preserved_mem_page(void *buf, dsp_long_t dspadr,
1527                                            int exmap_idx)
1528 {
1529         unsigned long phys;
1530         void *virt;
1531         struct tlb_entry tlb_ent;
1532
1533         phys = __pa(buf);
1534         virt = dspbyte_to_virt(dspadr);
1535         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1536         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], buf, virt);
1537         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1538         dsp_mmu_load_tlb(&tlb_ent);
1539 }
1540
1541 static void exmap_clear_mem_page(dsp_long_t dspadr)
1542 {
1543         void *virt;
1544
1545         virt = dspbyte_to_virt(dspadr);
1546         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1547         /* DSP MMU is shutting down. not handled here. */
1548 }
1549
1550 #ifdef CONFIG_ARCH_OMAP2
1551 static void exmap_setup_iomap_page(unsigned long phys, unsigned long dsp_io_adr,
1552                                    int exmap_idx)
1553 {
1554         dsp_long_t dspadr;
1555         void *virt;
1556         struct tlb_entry tlb_ent;
1557
1558         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1559         virt = dspbyte_to_virt(dspadr);
1560         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1561         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(&exmap_tbl[exmap_idx], NULL, virt);
1562         INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
1563         dsp_mmu_load_tlb(&tlb_ent);
1564 }
1565
1566 static void exmap_clear_iomap_page(unsigned long dsp_io_adr)
1567 {
1568         dsp_long_t dspadr;
1569         void *virt;
1570
1571         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
1572         virt = dspbyte_to_virt(dspadr);
1573         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1574         /* DSP MMU is shutting down. not handled here. */
1575 }
1576 #endif /* CONFIG_ARCH_OMAP2 */
1577
1578 #define OMAP2420_GPT5_BASE      (L4_24XX_BASE + 0x7c000)
1579 #define OMAP2420_GPT6_BASE      (L4_24XX_BASE + 0x7e000)
1580 #define OMAP2420_GPT7_BASE      (L4_24XX_BASE + 0x80000)
1581 #define OMAP2420_GPT8_BASE      (L4_24XX_BASE + 0x82000)
1582 #define OMAP24XX_EAC_BASE       (L4_24XX_BASE + 0x90000)
1583
1584 static int exmap_setup_preserved_entries(void)
1585 {
1586         int n = 0;
1587
1588         exmap_setup_preserved_mem_page(dspvect_page, DSP_INIT_PAGE, n++);
1589 #ifdef CONFIG_ARCH_OMAP2
1590         exmap_setup_iomap_page(OMAP24XX_PRCM_BASE,     0x7000, n++);
1591 #ifdef CONFIG_ARCH_OMAP2420
1592         exmap_setup_iomap_page(OMAP2420_GPT5_BASE,     0xe000, n++);
1593         exmap_setup_iomap_page(OMAP2420_GPT6_BASE,     0xe800, n++);
1594         exmap_setup_iomap_page(OMAP2420_GPT7_BASE,     0xf000, n++);
1595         exmap_setup_iomap_page(OMAP2420_GPT8_BASE,     0xf800, n++);
1596 #endif /* CONFIG_ARCH_OMAP2420 */
1597         exmap_setup_iomap_page(OMAP24XX_EAC_BASE,     0x10000, n++);
1598         exmap_setup_iomap_page(OMAP24XX_MAILBOX_BASE, 0x11000, n++);
1599 #endif /* CONFIG_ARCH_OMAP2 */
1600
1601         return n;
1602 }
1603
1604 static void exmap_clear_preserved_entries(void)
1605 {
1606         exmap_clear_mem_page(DSP_INIT_PAGE);
1607 #ifdef CONFIG_ARCH_OMAP2
1608         exmap_clear_iomap_page(0x7000);         /* PRCM */
1609 #ifdef CONFIG_ARCH_OMAP2420
1610         exmap_clear_iomap_page(0xe000);         /* GPT5 */
1611         exmap_clear_iomap_page(0xe800);         /* GPT6 */
1612         exmap_clear_iomap_page(0xf000);         /* GPT7 */
1613         exmap_clear_iomap_page(0xf800);         /* GPT8 */
1614 #endif /* CONFIG_ARCH_OMAP2420 */
1615         exmap_clear_iomap_page(0x10000);        /* EAC */
1616         exmap_clear_iomap_page(0x11000);        /* MAILBOX */
1617 #endif /* CONFIG_ARCH_OMAP2 */
1618 }
1619
1620 #ifdef CONFIG_ARCH_OMAP1
1621 static int dsp_mmu_itack(void)
1622 {
1623         unsigned long dspadr;
1624
1625         printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1626         if (!dsp_err_isset(ERRCODE_MMU)) {
1627                 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1628                 return -EINVAL;
1629         }
1630         dspadr = dsp_fault_adr & ~(SZ_4K-1);
1631         dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM);    /* FIXME: reserve TLB entry for this */
1632         printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1633         dsp_set_runlevel(RUNLEVEL_RECOVERY);
1634         __dsp_mmu_itack();
1635         udelay(100);
1636         dsp_exunmap(dspadr);
1637         dsp_err_clear(ERRCODE_MMU);
1638         return 0;
1639 }
1640 #endif /* CONFIG_ARCH_OMAP1 */
1641
1642 #ifdef CONFIG_ARCH_OMAP2
1643 #define MMU_IRQ_MASK \
1644         (DSP_MMU_IRQ_MULTIHITFAULT | \
1645          DSP_MMU_IRQ_TABLEWALKFAULT | \
1646          DSP_MMU_IRQ_EMUMISS | \
1647          DSP_MMU_IRQ_TRANSLATIONFAULT | \
1648          DSP_MMU_IRQ_TLBMISS)
1649 #endif
1650
1651 static void dsp_mmu_init(void)
1652 {
1653         struct tlb_lock tlb_lock;
1654
1655 #ifdef CONFIG_ARCH_OMAP1
1656         clk_enable(dsp_ck_handle);
1657         omap_dsp_request_mem();
1658 #endif
1659         down_write(&exmap_sem);
1660
1661 #if defined(CONFIG_ARCH_OMAP1)
1662         dsp_mmu_disable();      /* clear all */
1663         udelay(100);
1664 #elif defined(CONFIG_ARCH_OMAP2)
1665         dsp_mmu_reset();
1666 #endif
1667         dsp_mmu_enable();
1668
1669         /* DSP TLB initialization */
1670         tlb_lock.base   = 0;
1671         tlb_lock.victim = 0;
1672         set_tlb_lock(&tlb_lock);
1673
1674         exmap_preserved_cnt = exmap_setup_preserved_entries();
1675
1676 #ifdef CONFIG_ARCH_OMAP2
1677         /* MMU IRQ mask setup */
1678         dsp_mmu_write_reg(MMU_IRQ_MASK, DSP_MMU_IRQENABLE);
1679 #endif
1680
1681         up_write(&exmap_sem);
1682 #ifdef CONFIG_ARCH_OMAP1
1683         omap_dsp_release_mem();
1684         clk_disable(dsp_ck_handle);
1685 #endif
1686 }
1687
1688 static void dsp_mmu_shutdown(void)
1689 {
1690         exmap_flush();
1691         exmap_clear_preserved_entries();
1692         dsp_mmu_disable();
1693 }
1694
1695 #ifdef CONFIG_ARCH_OMAP1
1696 /*
1697  * intmem_enable() / disable():
1698  * if the address is in DSP internal memories,
1699  * we send PM mailbox commands so that DSP DMA domain won't go in idle
1700  * when ARM is accessing to those memories.
1701  */
1702 static int intmem_enable(void)
1703 {
1704         int ret = 0;
1705
1706         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1707                 ret = mbcompose_send(PM, PM_ENABLE, DSPREG_ICR_DMA);
1708
1709         return ret;
1710 }
1711
1712 static void intmem_disable(void) {
1713         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
1714                 mbcompose_send(PM, PM_DISABLE, DSPREG_ICR_DMA);
1715 }
1716 #endif /* CONFIG_ARCH_OMAP1 */
1717
1718 /*
1719  * dsp_mem_enable() / disable()
1720  */
1721 #ifdef CONFIG_ARCH_OMAP1
1722 int intmem_usecount;
1723 #endif
1724
1725 int dsp_mem_enable(void *adr)
1726 {
1727         int ret = 0;
1728
1729         if (is_dsp_internal_mem(adr)) {
1730 #ifdef CONFIG_ARCH_OMAP1
1731                 if (intmem_usecount++ == 0)
1732                         ret = omap_dsp_request_mem();
1733 #endif
1734         } else
1735                 down_read(&exmap_sem);
1736
1737         return ret;
1738 }
1739
1740 void dsp_mem_disable(void *adr)
1741 {
1742         if (is_dsp_internal_mem(adr)) {
1743 #ifdef CONFIG_ARCH_OMAP1
1744                 if (--intmem_usecount == 0)
1745                         omap_dsp_release_mem();
1746 #endif
1747         } else
1748                 up_read(&exmap_sem);
1749 }
1750
1751 /* for safety */
1752 #ifdef CONFIG_ARCH_OMAP1
1753 void dsp_mem_usecount_clear(void)
1754 {
1755         if (intmem_usecount != 0) {
1756                 printk(KERN_WARNING
1757                        "omapdsp: unbalanced memory request/release detected.\n"
1758                        "         intmem_usecount is not zero at where "
1759                        "it should be! ... fixed to be zero.\n");
1760                 intmem_usecount = 0;
1761                 omap_dsp_release_mem();
1762         }
1763 }
1764 #endif /* CONFIG_ARCH_OMAP1 */
1765
1766 /*
1767  * dsp_mem file operations
1768  */
1769 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1770 {
1771         loff_t ret;
1772
1773         mutex_lock(&file->f_dentry->d_inode->i_mutex);
1774         switch (orig) {
1775         case 0:
1776                 file->f_pos = offset;
1777                 ret = file->f_pos;
1778                 break;
1779         case 1:
1780                 file->f_pos += offset;
1781                 ret = file->f_pos;
1782                 break;
1783         default:
1784                 ret = -EINVAL;
1785         }
1786         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1787         return ret;
1788 }
1789
1790 static ssize_t intmem_read(struct file *file, char __user *buf, size_t count,
1791                            loff_t *ppos)
1792 {
1793         unsigned long p = *ppos;
1794         void *vadr = dspbyte_to_virt(p);
1795         ssize_t size = dspmem_size;
1796         ssize_t read;
1797
1798         if (p >= size)
1799                 return 0;
1800 #ifdef CONFIG_ARCH_OMAP1
1801         clk_enable(api_ck_handle);
1802 #endif
1803         read = count;
1804         if (count > size - p)
1805                 read = size - p;
1806         if (copy_to_user(buf, vadr, read)) {
1807                 read = -EFAULT;
1808                 goto out;
1809         }
1810         *ppos += read;
1811 out:
1812 #ifdef CONFIG_ARCH_OMAP1
1813         clk_disable(api_ck_handle);
1814 #endif
1815         return read;
1816 }
1817
1818 static ssize_t exmem_read(struct file *file, char __user *buf, size_t count,
1819                           loff_t *ppos)
1820 {
1821         unsigned long p = *ppos;
1822         void *vadr = dspbyte_to_virt(p);
1823
1824         if (!exmap_valid(vadr, count)) {
1825                 printk(KERN_ERR
1826                        "omapdsp: DSP address %08lx / size %08x "
1827                        "is not valid!\n", p, count);
1828                 return -EFAULT;
1829         }
1830         if (count > DSPSPACE_SIZE - p)
1831                 count = DSPSPACE_SIZE - p;
1832         if (copy_to_user(buf, vadr, count))
1833                 return -EFAULT;
1834         *ppos += count;
1835
1836         return count;
1837 }
1838
1839 static ssize_t dsp_mem_read(struct file *file, char __user *buf, size_t count,
1840                             loff_t *ppos)
1841 {
1842         int ret;
1843         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1844
1845         if (dsp_mem_enable(vadr) < 0)
1846                 return -EBUSY;
1847         if (is_dspbyte_internal_mem(*ppos))
1848                 ret = intmem_read(file, buf, count, ppos);
1849         else
1850                 ret = exmem_read(file, buf, count, ppos);
1851         dsp_mem_disable(vadr);
1852
1853         return ret;
1854 }
1855
1856 static ssize_t intmem_write(struct file *file, const char __user *buf,
1857                             size_t count, loff_t *ppos)
1858 {
1859         unsigned long p = *ppos;
1860         void *vadr = dspbyte_to_virt(p);
1861         ssize_t size = dspmem_size;
1862         ssize_t written;
1863
1864         if (p >= size)
1865                 return 0;
1866 #ifdef CONFIG_ARCH_OMAP1
1867         clk_enable(api_ck_handle);
1868 #endif
1869         written = count;
1870         if (count > size - p)
1871                 written = size - p;
1872         if (copy_from_user(vadr, buf, written)) {
1873                 written = -EFAULT;
1874                 goto out;
1875         }
1876         *ppos += written;
1877 out:
1878 #ifdef CONFIG_ARCH_OMAP1
1879         clk_disable(api_ck_handle);
1880 #endif
1881         return written;
1882 }
1883
1884 static ssize_t exmem_write(struct file *file, const char __user *buf,
1885                            size_t count, loff_t *ppos)
1886 {
1887         unsigned long p = *ppos;
1888         void *vadr = dspbyte_to_virt(p);
1889
1890         if (!exmap_valid(vadr, count)) {
1891                 printk(KERN_ERR
1892                        "omapdsp: DSP address %08lx / size %08x "
1893                        "is not valid!\n", p, count);
1894                 return -EFAULT;
1895         }
1896         if (count > DSPSPACE_SIZE - p)
1897                 count = DSPSPACE_SIZE - p;
1898         if (copy_from_user(vadr, buf, count))
1899                 return -EFAULT;
1900         *ppos += count;
1901
1902         return count;
1903 }
1904
1905 static ssize_t dsp_mem_write(struct file *file, const char __user *buf,
1906                              size_t count, loff_t *ppos)
1907 {
1908         int ret;
1909         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1910
1911         if (dsp_mem_enable(vadr) < 0)
1912                 return -EBUSY;
1913         if (is_dspbyte_internal_mem(*ppos))
1914                 ret = intmem_write(file, buf, count, ppos);
1915         else
1916                 ret = exmem_write(file, buf, count, ppos);
1917         dsp_mem_disable(vadr);
1918
1919         return ret;
1920 }
1921
1922 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1923                          unsigned int cmd, unsigned long arg)
1924 {
1925         switch (cmd) {
1926         case MEM_IOCTL_MMUINIT:
1927                 dsp_mmu_init();
1928                 return 0;
1929
1930         case MEM_IOCTL_EXMAP:
1931                 {
1932                         struct omap_dsp_mapinfo mapinfo;
1933                         if (copy_from_user(&mapinfo, (void __user *)arg,
1934                                            sizeof(mapinfo)))
1935                                 return -EFAULT;
1936                         return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1937                                          EXMAP_TYPE_MEM);
1938                 }
1939
1940         case MEM_IOCTL_EXUNMAP:
1941                 return dsp_exunmap((unsigned long)arg);
1942
1943         case MEM_IOCTL_EXMAP_FLUSH:
1944                 exmap_flush();
1945                 return 0;
1946
1947         case MEM_IOCTL_FBEXPORT:
1948                 {
1949                         dsp_long_t dspadr;
1950                         int ret;
1951                         if (copy_from_user(&dspadr, (void __user *)arg,
1952                                            sizeof(dsp_long_t)))
1953                                 return -EFAULT;
1954                         ret = dsp_fbexport(&dspadr);
1955                         if (copy_to_user((void __user *)arg, &dspadr,
1956                                          sizeof(dsp_long_t)))
1957                                 return -EFAULT;
1958                         return ret;
1959                 }
1960
1961 #ifdef CONFIG_ARCH_OMAP1
1962         case MEM_IOCTL_MMUITACK:
1963                 return dsp_mmu_itack();
1964 #endif
1965
1966         case MEM_IOCTL_KMEM_RESERVE:
1967                 {
1968                         __u32 size;
1969                         if (copy_from_user(&size, (void __user *)arg,
1970                                            sizeof(__u32)))
1971                                 return -EFAULT;
1972                         return dsp_kmem_reserve(size);
1973                 }
1974
1975         case MEM_IOCTL_KMEM_RELEASE:
1976                 dsp_kmem_release();
1977                 return 0;
1978
1979         default:
1980                 return -ENOIOCTLCMD;
1981         }
1982 }
1983
1984 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1985 {
1986         /*
1987          * FIXME
1988          */
1989         return -ENOSYS;
1990 }
1991
1992 static int dsp_mem_open(struct inode *inode, struct file *file)
1993 {
1994         if (!capable(CAP_SYS_RAWIO))
1995                 return -EPERM;
1996
1997         return 0;
1998 }
1999
2000 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
2001 /*
2002  * fb update functions:
2003  * fbupd_response() is executed by the workqueue.
2004  * fbupd_cb() is called when fb update is done, in interrupt context.
2005  * mbox_fbupd() is called when KFUNC:FBCTL:UPD is received from DSP.
2006  */
2007 static void fbupd_response(void *arg)
2008 {
2009         int status;
2010
2011         status = mbcompose_send(KFUNC, KFUNC_FBCTL, FBCTL_UPD);
2012         if (status < 0) {
2013                 /* FIXME: DSP is busy !! */
2014                 printk(KERN_ERR
2015                        "omapdsp: DSP is busy when trying to send FBCTL:UPD "
2016                        "response!\n");
2017         }
2018 }
2019
2020 static DECLARE_WORK(fbupd_response_work, (void (*)(void *))fbupd_response,
2021                     NULL);
2022
2023 static void fbupd_cb(void *arg)
2024 {
2025         schedule_work(&fbupd_response_work);
2026 }
2027
2028 void mbox_fbctl_upd(void)
2029 {
2030         struct omapfb_update_window win;
2031         volatile unsigned short *buf = ipbuf_sys_da->d;
2032
2033         /* FIXME: try count sometimes exceeds 1000. */
2034         if (sync_with_dsp(&ipbuf_sys_da->s, TID_ANON, 5000) < 0) {
2035                 printk(KERN_ERR "mbox: FBCTL:UPD - IPBUF sync failed!\n");
2036                 return;
2037         }
2038         win.x = buf[0];
2039         win.y = buf[1];
2040         win.width = buf[2];
2041         win.height = buf[3];
2042         win.format = buf[4];
2043         release_ipbuf_pvt(ipbuf_sys_da);
2044
2045         if (!omapfb_ready) {
2046                 printk(KERN_WARNING
2047                        "omapdsp: fbupd() called while HWA742 is not ready!\n");
2048                 return;
2049         }
2050         //printk("calling omapfb_update_window_async()\n");
2051         omapfb_update_window_async(registered_fb[1], &win, fbupd_cb, NULL);
2052 }
2053
2054 #else /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2055
2056 void mbox_fbctl_upd(void)
2057 {
2058 }
2059 #endif /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
2060
2061 /*
2062  * sysfs files
2063  */
2064
2065 /* mmu */
2066 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
2067                         char *buf)
2068 {
2069         int len;
2070         struct tlb_lock tlb_lock_org;
2071         int i;
2072
2073 #ifdef CONFIG_ARCH_OMAP1
2074         clk_enable(dsp_ck_handle);
2075         omap_dsp_request_mem();
2076 #endif
2077         down_read(&exmap_sem);
2078
2079         get_tlb_lock(&tlb_lock_org);
2080
2081 #if defined(CONFIG_ARCH_OMAP1)
2082         len = sprintf(buf, "P: preserved, V: valid\n"
2083                            "ety P V size   cam_va     ram_pa ap\n");
2084                          /* 00: P V  4KB 0x300000 0x10171800 FA */
2085 #elif defined(CONFIG_ARCH_OMAP2)
2086         len = sprintf(buf, "P: preserved, V: valid\n"
2087                            "B: big endian, L:little endian, "
2088                            "M: mixed page attribute\n"
2089                            "ety P V size   cam_va     ram_pa E ES M\n");
2090                          /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
2091 #endif
2092
2093         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2094                 struct cam_ram_regset cr;
2095                 struct tlb_lock tlb_lock_tmp;
2096                 struct tlb_entry ent;
2097 #if defined(CONFIG_ARCH_OMAP1)
2098                 char *pgsz_str, *ap_str;
2099 #elif defined(CONFIG_ARCH_OMAP2)
2100                 char *pgsz_str, *elsz_str;
2101 #endif
2102
2103                 /* read a TLB entry */
2104                 tlb_lock_tmp.base   = tlb_lock_org.base;
2105                 tlb_lock_tmp.victim = i;
2106                 __read_tlb(&tlb_lock_tmp, &cr);
2107
2108 #if defined(CONFIG_ARCH_OMAP1)
2109                 ent.pgsz  = cr.cam_l & DSP_MMU_CAM_PAGESIZE_MASK;
2110                 ent.prsvd = cr.cam_l & DSP_MMU_CAM_P;
2111                 ent.valid = cr.cam_l & DSP_MMU_CAM_V;
2112                 ent.ap    = cr.ram_l & DSP_MMU_RAM_L_AP_MASK;
2113                 ent.va = (u32)(cr.cam_h & DSP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
2114                          (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
2115                 ent.pa = (unsigned long)cr.ram_h << 16 |
2116                          (cr.ram_l & DSP_MMU_RAM_L_RAM_LSB_MASK);
2117
2118                 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
2119                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2120                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
2121                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1KB)  ? " 1KB":
2122                                                                      " ???";
2123                 ap_str = (ent.ap == DSP_MMU_RAM_L_AP_RO) ? "RO":
2124                          (ent.ap == DSP_MMU_RAM_L_AP_FA) ? "FA":
2125                          (ent.ap == DSP_MMU_RAM_L_AP_NA) ? "NA":
2126                                                            "??";
2127 #elif defined(CONFIG_ARCH_OMAP2)
2128                 ent.pgsz   = cr.cam & DSP_MMU_CAM_PAGESIZE_MASK;
2129                 ent.prsvd  = cr.cam & DSP_MMU_CAM_P;
2130                 ent.valid  = cr.cam & DSP_MMU_CAM_V;
2131                 ent.va     = cr.cam & DSP_MMU_CAM_VATAG_MASK;
2132                 ent.endian = cr.ram & DSP_MMU_RAM_ENDIANNESS;
2133                 ent.elsz   = cr.ram & DSP_MMU_RAM_ELEMENTSIZE_MASK;
2134                 ent.pa     = cr.ram & DSP_MMU_RAM_PADDR_MASK;
2135                 ent.mixed  = cr.ram & DSP_MMU_RAM_MIXED;
2136
2137                 pgsz_str = (ent.pgsz == DSP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
2138                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
2139                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
2140                            (ent.pgsz == DSP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
2141                                                                      " ???";
2142                 elsz_str = (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_8)  ? " 8":
2143                            (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_16) ? "16":
2144                            (ent.elsz == DSP_MMU_RAM_ELEMENTSIZE_32) ? "32":
2145                                                                       "??";
2146 #endif
2147
2148                 if (i == tlb_lock_org.base)
2149                         len += sprintf(buf + len, "lock base = %d\n",
2150                                        tlb_lock_org.base);
2151                 if (i == tlb_lock_org.victim)
2152                         len += sprintf(buf + len, "victim    = %d\n",
2153                                        tlb_lock_org.victim);
2154 #if defined(CONFIG_ARCH_OMAP1)
2155                 len += sprintf(buf + len,
2156                                /* 00: P V  4KB 0x300000 0x10171800 FA */
2157                                "%02d: %c %c %s 0x%06x 0x%08lx %s\n",
2158                                i,
2159                                ent.prsvd ? 'P' : ' ',
2160                                ent.valid ? 'V' : ' ',
2161                                pgsz_str, ent.va, ent.pa, ap_str);
2162 #elif defined(CONFIG_ARCH_OMAP2)
2163                 len += sprintf(buf + len,
2164                                /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
2165                                "%02d: %c %c %s 0x%06x 0x%08lx %c %s %c\n",
2166                                i,
2167                                ent.prsvd ? 'P' : ' ',
2168                                ent.valid ? 'V' : ' ',
2169                                pgsz_str, ent.va, ent.pa,
2170                                ent.endian ? 'B' : 'L',
2171                                elsz_str,
2172                                ent.mixed ? 'M' : ' ');
2173 #endif /* CONFIG_ARCH_OMAP2 */
2174         }
2175
2176         /* restore victim entry */
2177         set_tlb_lock(&tlb_lock_org);
2178
2179         up_read(&exmap_sem);
2180 #ifdef CONFIG_ARCH_OMAP1
2181         omap_dsp_release_mem();
2182         clk_disable(dsp_ck_handle);
2183 #endif
2184         return len;
2185 }
2186
2187 /* exmap */
2188 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
2189                           char *buf)
2190 {
2191         int len;
2192         int i;
2193
2194         down_read(&exmap_sem);
2195         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
2196                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
2197         for (i = 0; i < DSP_MMU_TLB_LINES; i++) {
2198                 struct exmap_tbl_entry *ent = &exmap_tbl[i];
2199                 void *vadr;
2200                 unsigned long size;
2201                 enum exmap_type_e type;
2202                 int idx;
2203
2204                 /* find a top of link */
2205                 if (!ent->valid || (ent->link.prev >= 0))
2206                         continue;
2207
2208                 vadr = ent->vadr;
2209                 type = ent->type;
2210                 size = 0;
2211                 idx = i;
2212                 do {
2213                         ent = &exmap_tbl[idx];
2214                         size += PAGE_SIZE << ent->order;
2215                 } while ((idx = ent->link.next) >= 0);
2216
2217                 len += sprintf(buf + len, "0x%06x %#8lx",
2218                                virt_to_dspbyte(vadr), size);
2219
2220                 if (type == EXMAP_TYPE_FB) {
2221                         len += sprintf(buf + len, "    framebuf\n");
2222                 } else {
2223                         len += sprintf(buf + len, "\n");
2224                         idx = i;
2225                         do {
2226                                 ent = &exmap_tbl[idx];
2227                                 len += sprintf(buf + len,
2228                                                /* 0xc0171000 0x100000  0*/
2229                                                "%19s0x%8p %#8lx %2d\n",
2230                                                "", ent->buf,
2231                                                PAGE_SIZE << ent->order,
2232                                                ent->usecount);
2233                         } while ((idx = ent->link.next) >= 0);
2234                 }
2235         }
2236
2237         up_read(&exmap_sem);
2238         return len;
2239 }
2240
2241 /* mempool */
2242 static ssize_t mempool_show(struct device *dev, struct device_attribute *attr,
2243                             char *buf)
2244 {
2245         int min_nr_1M = 0, curr_nr_1M = 0;
2246         int min_nr_64K = 0, curr_nr_64K = 0;
2247         int total = 0;
2248
2249         if (likely(kmem_pool_1M)) {
2250                 min_nr_1M  = kmem_pool_1M->min_nr;
2251                 curr_nr_1M = kmem_pool_1M->curr_nr;
2252                 total += min_nr_1M * SZ_1MB;
2253         }
2254         if (likely(kmem_pool_64K)) {
2255                 min_nr_64K  = kmem_pool_64K->min_nr;
2256                 curr_nr_64K = kmem_pool_64K->curr_nr;
2257                 total += min_nr_64K * SZ_64KB;
2258         }
2259
2260         return sprintf(buf,
2261                        "0x%x\n"
2262                        "1M  buffer: %d (%d free)\n"
2263                        "64K buffer: %d (%d free)\n",
2264                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
2265 }
2266
2267 /*
2268  * workqueue for mmu int
2269  */
2270 #ifdef CONFIG_ARCH_OMAP1
2271 /*
2272  * MMU fault mask:
2273  * We ignore prefetch err.
2274  */
2275 #define MMUFAULT_MASK \
2276         (DSP_MMU_FAULT_ST_PERM |\
2277          DSP_MMU_FAULT_ST_TLB_MISS |\
2278          DSP_MMU_FAULT_ST_TRANS)
2279 #endif /* CONFIG_ARCH_OMAP1 */
2280
2281 static void do_mmu_int(void)
2282 {
2283 #if defined(CONFIG_ARCH_OMAP1)
2284
2285         dsp_mmu_reg_t status;
2286         dsp_mmu_reg_t adh, adl;
2287         dsp_mmu_reg_t dp;
2288
2289         status = dsp_mmu_read_reg(DSP_MMU_FAULT_ST);
2290         adh = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_H);
2291         adl = dsp_mmu_read_reg(DSP_MMU_FAULT_AD_L);
2292         dp = adh & DSP_MMU_FAULT_AD_H_DP;
2293         dsp_fault_adr = MK32(adh & DSP_MMU_FAULT_AD_H_ADR_MASK, adl);
2294
2295         /* if the fault is masked, nothing to do */
2296         if ((status & MMUFAULT_MASK) == 0) {
2297                 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
2298                 /*
2299                  * note: in OMAP1710,
2300                  * when CACHE + DMA domain gets out of idle in DSP,
2301                  * MMU interrupt occurs but DSP_MMU_FAULT_ST is not set.
2302                  * in this case, we just ignore the interrupt.
2303                  */
2304                 if (status) {
2305                         printk(KERN_DEBUG "%s%s%s%s\n",
2306                                (status & DSP_MMU_FAULT_ST_PREF)?
2307                                         "  (prefetch err)" : "",
2308                                (status & DSP_MMU_FAULT_ST_PERM)?
2309                                         "  (permission fault)" : "",
2310                                (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2311                                         "  (TLB miss)" : "",
2312                                (status & DSP_MMU_FAULT_ST_TRANS) ?
2313                                         "  (translation fault)": "");
2314                         printk(KERN_DEBUG "fault address = %#08x\n",
2315                                dsp_fault_adr);
2316                 }
2317                 enable_irq(INT_DSP_MMU);
2318                 return;
2319         }
2320
2321 #elif defined(CONFIG_ARCH_OMAP2)
2322
2323         dsp_mmu_reg_t status;
2324
2325         status = dsp_mmu_read_reg(DSP_MMU_IRQSTATUS);
2326         dsp_fault_adr = dsp_mmu_read_reg(DSP_MMU_FAULT_AD);
2327
2328 #endif /* CONFIG_ARCH_OMAP2 */
2329
2330         printk(KERN_INFO "DSP MMU interrupt!\n");
2331
2332 #if defined(CONFIG_ARCH_OMAP1)
2333
2334         printk(KERN_INFO "%s%s%s%s\n",
2335                (status & DSP_MMU_FAULT_ST_PREF)?
2336                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PREF)?
2337                                 "  prefetch err":
2338                                 "  (prefetch err)":
2339                                 "",
2340                (status & DSP_MMU_FAULT_ST_PERM)?
2341                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_PERM)?
2342                                 "  permission fault":
2343                                 "  (permission fault)":
2344                                 "",
2345                (status & DSP_MMU_FAULT_ST_TLB_MISS)?
2346                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TLB_MISS)?
2347                                 "  TLB miss":
2348                                 "  (TLB miss)":
2349                                 "",
2350                (status & DSP_MMU_FAULT_ST_TRANS)?
2351                         (MMUFAULT_MASK & DSP_MMU_FAULT_ST_TRANS)?
2352                                 "  translation fault":
2353                                 "  (translation fault)":
2354                                 "");
2355
2356 #elif defined(CONFIG_ARCH_OMAP2)
2357
2358         printk(KERN_INFO "%s%s%s%s%s\n",
2359                (status & DSP_MMU_IRQ_MULTIHITFAULT)?
2360                         (MMU_IRQ_MASK & DSP_MMU_IRQ_MULTIHITFAULT)?
2361                                 "  multi hit":
2362                                 "  (multi hit)":
2363                                 "",
2364                (status & DSP_MMU_IRQ_TABLEWALKFAULT)?
2365                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TABLEWALKFAULT)?
2366                                 "  table walk fault":
2367                                 "  (table walk fault)":
2368                                 "",
2369                (status & DSP_MMU_IRQ_EMUMISS)?
2370                         (MMU_IRQ_MASK & DSP_MMU_IRQ_EMUMISS)?
2371                                 "  EMU miss":
2372                                 "  (EMU miss)":
2373                                 "",
2374                (status & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2375                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TRANSLATIONFAULT)?
2376                                 "  translation fault":
2377                                 "  (translation fault)":
2378                                 "",
2379                (status & DSP_MMU_IRQ_TLBMISS)?
2380                         (MMU_IRQ_MASK & DSP_MMU_IRQ_TLBMISS)?
2381                                 "  TLB miss":
2382                                 "  (TLB miss)":
2383                                 "");
2384
2385 #endif /* CONFIG_ARCH_OMAP2 */
2386
2387         printk(KERN_INFO "fault address = %#08x\n", dsp_fault_adr);
2388
2389         if (dsp_cfgstat_get_stat() == CFGSTAT_READY)
2390                 dsp_err_set(ERRCODE_MMU, (unsigned long)dsp_fault_adr);
2391         else {
2392 #ifdef CONFIG_ARCH_OMAP1
2393                 __dsp_mmu_itack();
2394 #endif
2395                 printk(KERN_INFO "Resetting DSP...\n");
2396                 dsp_cpustat_request(CPUSTAT_RESET);
2397                 /*
2398                  * if we enable followings, semaphore lock should be avoided.
2399                  *
2400                 printk(KERN_INFO "Flushing DSP MMU...\n");
2401                 exmap_flush();
2402                 dsp_mmu_init();
2403                  */
2404         }
2405
2406 #ifdef CONFIG_ARCH_OMAP2
2407         dsp_mmu_disable();
2408         dsp_mmu_write_reg(status, DSP_MMU_IRQSTATUS);
2409         dsp_mmu_enable();
2410 #endif
2411
2412         enable_irq(INT_DSP_MMU);
2413 }
2414
2415 static DECLARE_WORK(mmu_int_work, (void (*)(void *))do_mmu_int, NULL);
2416
2417 /*
2418  * DSP MMU interrupt handler
2419  */
2420
2421 static irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id,
2422                                      struct pt_regs *regs)
2423 {
2424         disable_irq(INT_DSP_MMU);
2425         schedule_work(&mmu_int_work);
2426         return IRQ_HANDLED;
2427 }
2428
2429 /*
2430  *
2431  */
2432 struct file_operations dsp_mem_fops = {
2433         .owner   = THIS_MODULE,
2434         .llseek  = dsp_mem_lseek,
2435         .read    = dsp_mem_read,
2436         .write   = dsp_mem_write,
2437         .ioctl   = dsp_mem_ioctl,
2438         .mmap    = dsp_mem_mmap,
2439         .open    = dsp_mem_open,
2440 };
2441
2442 void dsp_mem_start(void)
2443 {
2444 #ifdef CONFIG_ARCH_OMAP1
2445         dsp_register_mem_cb(intmem_enable, intmem_disable);
2446 #endif
2447 }
2448
2449 void dsp_mem_stop(void)
2450 {
2451         memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
2452 #ifdef CONFIG_ARCH_OMAP1
2453         dsp_unregister_mem_cb();
2454 #endif
2455 }
2456
2457 static char devid_mmu;
2458
2459 int __init dsp_mem_init(void)
2460 {
2461         int i;
2462         int ret = 0;
2463 #ifdef CONFIG_ARCH_OMAP2
2464         int dspmem_pg_count;
2465
2466         dspmem_pg_count = dspmem_size >> 12;
2467         for (i = 0; i < dspmem_pg_count; i++) {
2468                 dsp_ipi_write_reg(i, DSP_IPI_INDEX);
2469                 dsp_ipi_write_reg(DSP_IPI_ENTRY_ELMSIZEVALUE_16, DSP_IPI_ENTRY);
2470         }
2471         dsp_ipi_write_reg(1, DSP_IPI_ENABLE);
2472
2473         dsp_ipi_write_reg(IOMAP_VAL, DSP_IPI_IOMAP);
2474 #endif
2475
2476         for (i = 0; i < DSP_MMU_TLB_LINES; i++)
2477                 exmap_tbl[i].valid = 0;
2478
2479         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
2480         if (dspvect_page == NULL) {
2481                 printk(KERN_ERR
2482                        "omapdsp: failed to allocate memory "
2483                        "for dsp vector table\n");
2484                 return -ENOMEM;
2485         }
2486         dsp_mmu_init();
2487 #ifdef CONFIG_ARCH_OMAP1
2488         dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
2489 #endif
2490
2491         /*
2492          * DSP MMU interrupt setup
2493          */
2494         ret = request_irq(INT_DSP_MMU, dsp_mmu_interrupt, SA_INTERRUPT, "dsp",
2495                           &devid_mmu);
2496         if (ret) {
2497                 printk(KERN_ERR
2498                        "failed to register DSP MMU interrupt: %d\n", ret);
2499                 goto fail;
2500         }
2501
2502         /* MMU interrupt is not enabled until DSP runs */
2503         disable_irq(INT_DSP_MMU);
2504
2505         device_create_file(&dsp_device.dev, &dev_attr_mmu);
2506         device_create_file(&dsp_device.dev, &dev_attr_exmap);
2507         device_create_file(&dsp_device.dev, &dev_attr_mempool);
2508
2509         return 0;
2510
2511 fail:
2512 #ifdef CONFIG_ARCH_OMAP1
2513         dsp_reset_idle_boot_base();
2514 #endif
2515         dsp_mmu_shutdown();
2516         free_page((unsigned long)dspvect_page);
2517         dspvect_page = NULL;
2518         return ret;
2519 }
2520
2521 void dsp_mem_exit(void)
2522 {
2523         free_irq(INT_DSP_MMU, &devid_mmu);
2524
2525         /* recover disable_depth */
2526         enable_irq(INT_DSP_MMU);
2527
2528 #ifdef CONFIG_ARCH_OMAP1
2529         dsp_reset_idle_boot_base();
2530 #endif
2531         dsp_mmu_shutdown();
2532         dsp_kmem_release();
2533
2534         if (dspvect_page != NULL) {
2535                 free_page((unsigned long)dspvect_page);
2536                 dspvect_page = NULL;
2537         }
2538
2539         device_remove_file(&dsp_device.dev, &dev_attr_mmu);
2540         device_remove_file(&dsp_device.dev, &dev_attr_exmap);
2541         device_remove_file(&dsp_device.dev, &dev_attr_mempool);
2542 }