]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dsp/dsp_mem.c
fbce6eb9694c89a893125820db8ee5c761fb7b8c
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dsp / dsp_mem.c
1 /*
2  * linux/arch/arm/mach-omap/dsp/dsp_mem.c
3  *
4  * OMAP DSP memory driver
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  *
24  * Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
25  * 2005/06/09:  DSP Gateway version 3.3
26  */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bootmem.h>
33 #include <linux/fb.h>
34 #include <linux/interrupt.h>
35 #include <linux/delay.h>
36 #include <linux/platform_device.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/ioctls.h>
40 #include <asm/irq.h>
41 #include <asm/pgalloc.h>
42 #include <asm/pgtable.h>
43 #include <asm/hardware/clock.h>
44 #include <asm/arch/tc.h>
45 #include <asm/arch/dsp.h>
46 #include <asm/arch/dsp_common.h>
47 #include "uaccess_dsp.h"
48 #include "dsp.h"
49
50 #define SZ_1MB  0x100000
51 #define SZ_64KB 0x10000
52 #define SZ_4KB  0x1000
53 #define SZ_1KB  0x400
54 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
55 #define ORDER_1MB       (20 - PAGE_SHIFT)
56 #define ORDER_64KB      (16 - PAGE_SHIFT)
57 #define ORDER_4KB       (12 - PAGE_SHIFT)
58
59 #define PGDIR_MASK              (~(PGDIR_SIZE-1))
60 #define PGDIR_ALIGN(addr)       (((addr)+PGDIR_SIZE-1)&(PGDIR_MASK))
61
62 #define dsp_mmu_enable() \
63         do { \
64                 omap_writew(DSPMMU_CNTL_MMU_EN | DSPMMU_CNTL_RESET_SW, \
65                             DSPMMU_CNTL); \
66         } while(0)
67 #define dsp_mmu_disable() \
68         do { omap_writew(0, DSPMMU_CNTL); } while(0)
69 #define dsp_mmu_flush() \
70         do { \
71                 omap_writew(DSPMMU_FLUSH_ENTRY_FLUSH_ENTRY, \
72                             DSPMMU_FLUSH_ENTRY); \
73         } while(0)
74 #define __dsp_mmu_gflush() \
75         do { omap_writew(DSPMMU_GFLUSH_GFLUSH, DSPMMU_GFLUSH); } while(0)
76 #define __dsp_mmu_itack() \
77         do { omap_writew(DSPMMU_IT_ACK_IT_ACK, DSPMMU_IT_ACK); } while(0)
78
79 #define EMIF_PRIO_LB_MASK       0x0000f000
80 #define EMIF_PRIO_LB_SHIFT      12
81 #define EMIF_PRIO_DMA_MASK      0x00000f00
82 #define EMIF_PRIO_DMA_SHIFT     8
83 #define EMIF_PRIO_DSP_MASK      0x00000070
84 #define EMIF_PRIO_DSP_SHIFT     4
85 #define EMIF_PRIO_MPU_MASK      0x00000007
86 #define EMIF_PRIO_MPU_SHIFT     0
87 #define set_emiff_dma_prio(prio) \
88         do { \
89                 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
90         ~EMIF_PRIO_DMA_MASK) | \
91                             ((prio) << EMIF_PRIO_DMA_SHIFT), \
92                             OMAP_TC_OCPT1_PRIOR); \
93         } while(0)
94
95 enum exmap_type {
96         EXMAP_TYPE_MEM,
97         EXMAP_TYPE_FB
98 };
99
100 struct exmap_tbl {
101         unsigned int valid:1;
102         unsigned int cntnu:1;   /* grouping */
103         int usecount;           /* reference count by mmap */
104         enum exmap_type type;
105         void *buf;              /* virtual address of the buffer,
106                                  * i.e. 0xc0000000 - */
107         void *vadr;             /* DSP shadow space,
108                                  * i.e. 0xe0000000 - 0xe0ffffff */
109         unsigned int order;
110 };
111 #define DSPMMU_TLB_LINES        32
112 static struct exmap_tbl exmap_tbl[DSPMMU_TLB_LINES];
113 static DECLARE_RWSEM(exmap_sem);
114
115 static int dsp_exunmap(unsigned long dspadr);
116
117 static void *dspvect_page;
118 static unsigned long dsp_fault_adr;
119 static struct mem_sync_struct mem_sync;
120
121 static __inline__ unsigned long lineup_offset(unsigned long adr,
122                                               unsigned long ref,
123                                               unsigned long mask)
124 {
125         unsigned long newadr;
126
127         newadr = (adr & ~mask) | (ref & mask);
128         if (newadr < adr)
129                 newadr += mask + 1;
130         return newadr;
131 }
132
133 void dsp_mem_sync_inc(void)
134 {
135         /*
136          * FIXME: dsp_mem_enable()!!!
137          */
138         if (mem_sync.DARAM)
139                 mem_sync.DARAM->ad_arm++;
140         if (mem_sync.SARAM)
141                 mem_sync.SARAM->ad_arm++;
142         if (mem_sync.SDRAM)
143                 mem_sync.SDRAM->ad_arm++;
144 }
145
146 /*
147  * dsp_mem_sync_config() is called from mbx1 workqueue
148  */
149 int dsp_mem_sync_config(struct mem_sync_struct *sync)
150 {
151         size_t sync_seq_sz = sizeof(struct sync_seq);
152
153 #ifdef OLD_BINARY_SUPPORT
154         if (sync == NULL) {
155                 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
156                 return 0;
157         }
158 #endif
159         if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
160             (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
161             (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
162                 printk(KERN_ERR
163                        "omapdsp: mem_sync address validation failure!\n"
164                        "  mem_sync.DARAM = 0x%p,\n"
165                        "  mem_sync.SARAM = 0x%p,\n"
166                        "  mem_sync.SDRAM = 0x%p,\n",
167                        sync->DARAM, sync->SARAM, sync->SDRAM);
168                 return -1;
169         }
170         memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
171         return 0;
172 }
173
174 /*
175  * kmem_reserve(), kmem_release():
176  * reserve or release kernel memory for exmap().
177  *
178  * exmap() might request consecutive 1MB or 64kB,
179  * but it will be difficult after memory pages are fragmented.
180  * So, user can reserve such memory blocks in the early phase
181  * through kmem_reserve().
182  */
183 struct kmem_pool {
184         struct semaphore sem;
185         unsigned long buf[16];
186         int count;
187 };
188
189 #define KMEM_POOL_INIT(name) \
190 { \
191         .sem = __SEMAPHORE_INIT((name).sem, 1), \
192 }
193 #define DECLARE_KMEM_POOL(name) \
194         struct kmem_pool name = KMEM_POOL_INIT(name)
195
196 DECLARE_KMEM_POOL(kmem_pool_1M);
197 DECLARE_KMEM_POOL(kmem_pool_64K);
198
199 static void dsp_kmem_release(void)
200 {
201         int i;
202
203         down(&kmem_pool_1M.sem);
204         for (i = 0; i < kmem_pool_1M.count; i++) {
205                 if (kmem_pool_1M.buf[i])
206                         free_pages(kmem_pool_1M.buf[i], ORDER_1MB);
207         }
208         kmem_pool_1M.count = 0;
209         up(&kmem_pool_1M.sem);
210
211         down(&kmem_pool_64K.sem);
212         for (i = 0; i < kmem_pool_64K.count; i++) {
213                 if (kmem_pool_64K.buf[i])
214                         free_pages(kmem_pool_64K.buf[i], ORDER_64KB);
215         }
216         kmem_pool_64K.count = 0;
217         up(&kmem_pool_1M.sem);
218 }
219
220 static int dsp_kmem_reserve(unsigned long size)
221 {
222         unsigned long buf;
223         unsigned int order;
224         unsigned long unit;
225         unsigned long _size;
226         struct kmem_pool *pool;
227         int i;
228
229         /* alignment check */
230         if (!is_aligned(size, SZ_64KB)) {
231                 printk(KERN_ERR
232                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
233                 return -EINVAL;
234         }
235         if (size > DSPSPACE_SIZE) {
236                 printk(KERN_ERR
237                        "omapdsp: size(0x%lx) is larger than DSP memory space "
238                        "size (0x%x.\n", size, DSPSPACE_SIZE);
239                 return -EINVAL;
240         }
241
242         for (_size = size; _size; _size -= unit) {
243                 if (_size >= SZ_1MB) {
244                         unit = SZ_1MB;
245                         order = ORDER_1MB;
246                         pool = &kmem_pool_1M;
247                 } else {
248                         unit = SZ_64KB;
249                         order = ORDER_64KB;
250                         pool = &kmem_pool_64K;
251                 }
252
253                 buf = __get_dma_pages(GFP_KERNEL, order);
254                 if (!buf)
255                         return size - _size;
256                 down(&pool->sem);
257                 for (i = 0; i < 16; i++) {
258                         if (!pool->buf[i]) {
259                                 pool->buf[i] = buf;
260                                 pool->count++;
261                                 buf = 0;
262                                 break;
263                         }
264                 }
265                 up(&pool->sem);
266
267                 if (buf) {      /* pool is full */
268                         free_pages(buf, order);
269                         return size - _size;
270                 }
271         }
272
273         return size;
274 }
275
276 static unsigned long dsp_mem_get_dma_pages(unsigned int order)
277 {
278         struct kmem_pool *pool;
279         unsigned long buf = 0;
280         int i;
281
282         switch (order) {
283                 case ORDER_1MB:
284                         pool = &kmem_pool_1M;
285                         break;
286                 case ORDER_64KB:
287                         pool = &kmem_pool_64K;
288                         break;
289                 default:
290                         pool = NULL;
291         }
292
293         if (pool) {
294                 down(&pool->sem);
295                 for (i = 0; i < pool->count; i++) {
296                         if (pool->buf[i]) {
297                                 buf = pool->buf[i];
298                                 pool->buf[i] = 0;
299                                 break;
300                         }
301                 }
302                 up(&pool->sem);
303                 if (buf)
304                         return buf;
305         }
306
307         /* other size or not found in pool */
308         return __get_dma_pages(GFP_KERNEL, order);
309 }
310
311 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
312 {
313         struct kmem_pool *pool;
314         struct page *page, *ps, *pe;
315         int i;
316
317         ps = virt_to_page(buf);
318         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
319         for (page = ps; page < pe; page++) {
320                 ClearPageReserved(page);
321         }
322
323         /*
324          * return buffer to kmem_pool or paging system
325          */
326         switch (order) {
327                 case ORDER_1MB:
328                         pool = &kmem_pool_1M;
329                         break;
330                 case ORDER_64KB:
331                         pool = &kmem_pool_64K;
332                         break;
333                 default:
334                         pool = NULL;
335         }
336
337         if (pool) {
338                 down(&pool->sem);
339                 for (i = 0; i < pool->count; i++) {
340                         if (!pool->buf[i]) {
341                                 pool->buf[i] = buf;
342                                 buf = 0;
343                         }
344                 }
345                 up(&pool->sem);
346         }
347
348         /* other size or pool is filled */
349         if (buf)
350                 free_pages(buf, order);
351 }
352
353 /*
354  * ARM MMU operations
355  */
356 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
357                             unsigned long size)
358 {
359         long off;
360         unsigned long sz_left;
361         pmd_t *pmdp;
362         pte_t *ptep;
363         int prot_pmd, prot_pte;
364
365         printk(KERN_DEBUG
366                "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
367                virt, phys, size);
368
369         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
370         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
371
372         pmdp = pmd_offset(pgd_offset_k(virt), virt);
373         if (pmd_none(*pmdp)) {
374                 ptep = pte_alloc_one_kernel(&init_mm, 0);
375                 if (ptep == NULL)
376                         return -ENOMEM;
377                 /* note: two PMDs will be set  */
378                 pmd_populate_kernel(&init_mm, pmdp, ptep);
379         }
380
381         off = phys - virt;
382         for (sz_left = size;
383              sz_left >= PAGE_SIZE;
384              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
385                 ptep = pte_offset_kernel(pmdp, virt);
386                 set_pte(ptep, __pte((virt + off) | prot_pte));
387         }
388         if (sz_left)
389                 BUG();
390
391         return 0;
392 }
393
394 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
395 {
396         unsigned long sz_left;
397         pmd_t *pmdp;
398         pte_t *ptep;
399
400         printk(KERN_DEBUG
401                "omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
402                virt, size);
403
404         for (sz_left = size;
405              sz_left >= PAGE_SIZE;
406              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
407                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
408                 ptep = pte_offset_kernel(pmdp, virt);
409                 pte_clear(&init_mm, virt, ptep);
410         }
411         if (sz_left)
412                 BUG();
413 }
414
415 static int exmap_valid(void *vadr, size_t len)
416 {
417         /* exmap_sem should be held before calling this function */
418         int i;
419
420 start:
421         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
422                 void *mapadr;
423                 unsigned long mapsize;
424                 struct exmap_tbl *ent = &exmap_tbl[i];
425
426                 if (!ent->valid)
427                         continue;
428                 mapadr = (void *)ent->vadr;
429                 mapsize = 1 << (ent->order + PAGE_SHIFT);
430                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
431                         if (vadr + len <= mapadr + mapsize) {
432                                 /* this map covers whole address. */
433                                 return 1;
434                         } else {
435                                 /*
436                                  * this map covers partially.
437                                  * check rest portion.
438                                  */
439                                 len -= mapadr + mapsize - vadr;
440                                 vadr = mapadr + mapsize;
441                                 goto start;
442                         }
443                 }
444         }
445
446         return 0;
447 }
448
449 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
450 {
451         void *ds = (void *)daram_base;
452         void *de = (void *)daram_base + daram_size;
453         void *ss = (void *)saram_base;
454         void *se = (void *)saram_base + saram_size;
455         int ret;
456
457         if ((vadr >= ds) && (vadr < de)) {
458                 if (vadr + len > de)
459                         return MEM_TYPE_CROSSING;
460                 else
461                         return MEM_TYPE_DARAM;
462         } else if ((vadr >= ss) && (vadr < se)) {
463                 if (vadr + len > se)
464                         return MEM_TYPE_CROSSING;
465                 else
466                         return MEM_TYPE_SARAM;
467         } else {
468                 down_read(&exmap_sem);
469                 if (exmap_valid(vadr, len))
470                         ret = MEM_TYPE_EXTERN;
471                 else
472                         ret = MEM_TYPE_NONE;
473                 up_read(&exmap_sem);
474                 return ret;
475         }
476 }
477
478 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
479 {
480         if (dsp_mem_type(p, len) <= 0) {
481                 if (fmt != NULL) {
482                         char s[64];
483                         va_list args;
484
485                         va_start(args, fmt);
486                         vsprintf(s, fmt, args);
487                         va_end(args);
488                         printk(KERN_ERR
489                                "omapdsp: %s address(0x%p) and size(0x%x) is "
490                                "not valid!\n"
491                                "         (crossing different type of memories, or \n"
492                                "          external memory space where no "
493                                "actual memory is mapped)\n",
494                                s, p, len);
495                 }
496                 return -1;
497         }
498
499         return 0;
500 }
501
502 /*
503  * exmap_use(), unuse(): 
504  * when the mapped area is exported to user space with mmap,
505  * the usecount is incremented.
506  * while the usecount > 0, that area can't be released.
507  */
508 void exmap_use(void *vadr, size_t len)
509 {
510         int i;
511
512         down_write(&exmap_sem);
513         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
514                 void *mapadr;
515                 unsigned long mapsize;
516                 struct exmap_tbl *ent = &exmap_tbl[i];
517
518                 if (!ent->valid)
519                         continue;
520                 mapadr = (void *)ent->vadr;
521                 mapsize = 1 << (ent->order + PAGE_SHIFT);
522                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
523                         ent->usecount++;
524                 }
525         }
526         up_write(&exmap_sem);
527 }
528
529 void exmap_unuse(void *vadr, size_t len)
530 {
531         int i;
532
533         down_write(&exmap_sem);
534         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
535                 void *mapadr;
536                 unsigned long mapsize;
537                 struct exmap_tbl *ent = &exmap_tbl[i];
538
539                 if (!ent->valid)
540                         continue;
541                 mapadr = (void *)ent->vadr;
542                 mapsize = 1 << (ent->order + PAGE_SHIFT);
543                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
544                         ent->usecount--;
545                 }
546         }
547         up_write(&exmap_sem);
548 }
549
550 /*
551  * dsp_virt_to_phys()
552  * returns physical address, and sets len to valid length
553  */
554 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
555 {
556         int i;
557
558         if (is_dsp_internal_mem(vadr)) {
559                 /* DSRAM or SARAM */
560                 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
561                 return (unsigned long)vadr;
562         }
563
564         /* EXRAM */
565         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
566                 void *mapadr;
567                 unsigned long mapsize;
568                 struct exmap_tbl *ent = &exmap_tbl[i];
569
570                 if (!ent->valid)
571                         continue;
572                 mapadr = (void *)ent->vadr;
573                 mapsize = 1 << (ent->order + PAGE_SHIFT);
574                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
575                         *len = mapadr + mapsize - vadr;
576                         return __pa(ent->buf) + vadr - mapadr;
577                 }
578         }
579
580         /* valid mapping not found */
581         return 0;
582 }
583
584 /*
585  * DSP MMU operations
586  */
587 static __inline__ unsigned short get_cam_l_va_mask(unsigned short slst)
588 {
589         switch (slst) {
590         case DSPMMU_CAM_L_SLST_1MB:
591                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
592                        DSPMMU_CAM_L_VA_TAG_L2_MASK_1MB;
593         case DSPMMU_CAM_L_SLST_64KB:
594                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
595                        DSPMMU_CAM_L_VA_TAG_L2_MASK_64KB;
596         case DSPMMU_CAM_L_SLST_4KB:
597                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
598                        DSPMMU_CAM_L_VA_TAG_L2_MASK_4KB;
599         case DSPMMU_CAM_L_SLST_1KB:
600                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
601                        DSPMMU_CAM_L_VA_TAG_L2_MASK_1KB;
602         }
603         return 0;
604 }
605
606 static __inline__ void get_tlb_lock(int *base, int *victim)
607 {
608         unsigned short lock = omap_readw(DSPMMU_LOCK);
609         if (base != NULL)
610                 *base = (lock & DSPMMU_LOCK_BASE_MASK)
611                         >> DSPMMU_LOCK_BASE_SHIFT;
612         if (victim != NULL)
613                 *victim = (lock & DSPMMU_LOCK_VICTIM_MASK)
614                           >> DSPMMU_LOCK_VICTIM_SHIFT;
615 }
616
617 static __inline__ void set_tlb_lock(int base, int victim)
618 {
619         omap_writew((base   << DSPMMU_LOCK_BASE_SHIFT) |
620                     (victim << DSPMMU_LOCK_VICTIM_SHIFT), DSPMMU_LOCK);
621 }
622
623 static __inline__ void __read_tlb(unsigned short lbase, unsigned short victim,
624                                   unsigned short *cam_h, unsigned short *cam_l,
625                                   unsigned short *ram_h, unsigned short *ram_l)
626 {
627         /* set victim */
628         set_tlb_lock(lbase, victim);
629
630         /* read a TLB entry */
631         omap_writew(DSPMMU_LD_TLB_RD, DSPMMU_LD_TLB);
632
633         if (cam_h != NULL)
634                 *cam_h = omap_readw(DSPMMU_READ_CAM_H);
635         if (cam_l != NULL)
636                 *cam_l = omap_readw(DSPMMU_READ_CAM_L);
637         if (ram_h != NULL)
638                 *ram_h = omap_readw(DSPMMU_READ_RAM_H);
639         if (ram_l != NULL)
640                 *ram_l = omap_readw(DSPMMU_READ_RAM_L);
641 }
642
643 static __inline__ void __load_tlb(unsigned short cam_h, unsigned short cam_l,
644                                   unsigned short ram_h, unsigned short ram_l)
645 {
646         omap_writew(cam_h, DSPMMU_CAM_H);
647         omap_writew(cam_l, DSPMMU_CAM_L);
648         omap_writew(ram_h, DSPMMU_RAM_H);
649         omap_writew(ram_l, DSPMMU_RAM_L);
650
651         /* flush the entry */
652         dsp_mmu_flush();
653
654         /* load a TLB entry */
655         omap_writew(DSPMMU_LD_TLB_LD, DSPMMU_LD_TLB);
656 }
657
658 static int dsp_mmu_load_tlb(unsigned long vadr, unsigned long padr,
659                             unsigned short slst, unsigned short prsvd,
660                             unsigned short ap)
661 {
662         int lbase, victim;
663         unsigned short cam_l_va_mask;
664
665         clk_use(dsp_ck_handle);
666
667         get_tlb_lock(&lbase, NULL);
668         for (victim = 0; victim < lbase; victim++) {
669                 unsigned short cam_l;
670
671                 /* read a TLB entry */
672                 __read_tlb(lbase, victim, NULL, &cam_l, NULL, NULL);
673                 if (!(cam_l & DSPMMU_CAM_L_V))
674                         goto found_victim;
675         }
676         set_tlb_lock(lbase, victim);
677
678 found_victim:
679         /* The last (31st) entry cannot be locked? */
680         if (victim == 31) {
681                 printk(KERN_ERR "omapdsp: TLB is full.\n");
682                 return -EBUSY;
683         }
684
685         cam_l_va_mask = get_cam_l_va_mask(slst);
686         if (vadr &
687             ~(DSPMMU_CAM_H_VA_TAG_H_MASK << 22 |
688               (unsigned long)cam_l_va_mask << 6)) {
689                 printk(KERN_ERR
690                        "omapdsp: mapping vadr (0x%06lx) is not "
691                        "aligned boundary\n", vadr);
692                 return -EINVAL;
693         }
694
695         __load_tlb(vadr >> 22, (vadr >> 6 & cam_l_va_mask) | prsvd | slst,
696                    padr >> 16, (padr & DSPMMU_RAM_L_RAM_LSB_MASK) | ap);
697
698         /* update lock base */
699         if (victim == lbase)
700                 lbase++;
701         set_tlb_lock(lbase, lbase);
702
703         clk_unuse(dsp_ck_handle);
704         return 0;
705 }
706
707 static int dsp_mmu_clear_tlb(unsigned long vadr)
708 {
709         int lbase;
710         int i;
711         int max_valid = 0;
712
713         clk_use(dsp_ck_handle);
714
715         get_tlb_lock(&lbase, NULL);
716         for (i = 0; i < lbase; i++) {
717                 unsigned short cam_h, cam_l;
718                 unsigned short cam_l_va_mask, cam_vld, slst;
719                 unsigned long cam_va;
720
721                 /* read a TLB entry */
722                 __read_tlb(lbase, i, &cam_h, &cam_l, NULL, NULL);
723
724                 cam_vld = cam_l & DSPMMU_CAM_L_V;
725                 if (!cam_vld)
726                         continue;
727
728                 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
729                 cam_l_va_mask = get_cam_l_va_mask(slst);
730                 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
731                          (unsigned long)(cam_l & cam_l_va_mask) << 6;
732
733                 if (cam_va == vadr)
734                         /* flush the entry */
735                         dsp_mmu_flush();
736                 else
737                         max_valid = i;
738         }
739
740         /* set new lock base */
741         set_tlb_lock(max_valid+1, max_valid+1);
742
743         clk_unuse(dsp_ck_handle);
744         return 0;
745 }
746
747 static void dsp_mmu_gflush(void)
748 {
749         clk_use(dsp_ck_handle);
750
751         __dsp_mmu_gflush();
752         set_tlb_lock(1, 1);
753
754         clk_unuse(dsp_ck_handle);
755 }
756
757 /*
758  * dsp_exmap()
759  *
760  * OMAP_DSP_MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
761  * In this case, the buffer for DSP is allocated in this routine,
762  * then it is mapped.
763  * On the other hand, for example - frame buffer sharing, calls
764  * this function with padr set. It means some known address space
765  * pointed with padr is going to be shared with DSP.
766  */
767 static int dsp_exmap(unsigned long dspadr, unsigned long padr,
768                      unsigned long size, enum exmap_type type)
769 {
770         unsigned short slst;
771         void *buf;
772         unsigned int order = 0;
773         unsigned long unit;
774         unsigned int cntnu = 0;
775         unsigned long _dspadr = dspadr;
776         unsigned long _padr = padr;
777         void *_vadr = dspbyte_to_virt(dspadr);
778         unsigned long _size = size;
779         struct exmap_tbl *exmap_ent;
780         int status;
781         int i;
782
783 #define MINIMUM_PAGESZ  SZ_4KB
784         /*
785          * alignment check
786          */
787         if (!is_aligned(size, MINIMUM_PAGESZ)) {
788                 printk(KERN_ERR
789                        "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
790                 return -EINVAL;
791         }
792         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
793                 printk(KERN_ERR
794                        "omapdsp: DSP address(0x%lx) is not aligned.\n", dspadr);
795                 return -EINVAL;
796         }
797         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
798                 printk(KERN_ERR
799                        "omapdsp: physical address(0x%lx) is not aligned.\n",
800                        padr);
801                 return -EINVAL;
802         }
803
804         /* address validity check */
805         if ((dspadr < dspmem_size) ||
806             (dspadr >= DSPSPACE_SIZE) ||
807             ((dspadr + size > DSP_INIT_PAGE) &&
808              (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
809                 printk(KERN_ERR
810                        "omapdsp: illegal address/size for dsp_exmap().\n");
811                 return -EINVAL;
812         }
813
814         down_write(&exmap_sem);
815
816         /* overlap check */
817         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
818                 unsigned long mapsize;
819                 struct exmap_tbl *tmp_ent = &exmap_tbl[i];
820
821                 if (!tmp_ent->valid)
822                         continue;
823                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
824                 if ((_vadr + size > tmp_ent->vadr) &&
825                     (_vadr < tmp_ent->vadr + mapsize)) {
826                         printk(KERN_ERR "omapdsp: exmap page overlap!\n");
827                         up_write(&exmap_sem);
828                         return -EINVAL;
829                 }
830         }
831
832 start:
833         buf = NULL;
834         /* Are there any free TLB lines?  */
835         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
836                 if (!exmap_tbl[i].valid)
837                         goto found_free;
838         }
839         printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
840         status = -EBUSY;
841         goto fail;
842
843 found_free:
844         exmap_ent = &exmap_tbl[i];
845
846         if ((_size >= SZ_1MB) &&
847             (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
848             is_aligned(_dspadr, SZ_1MB)) {
849                 unit = SZ_1MB;
850                 slst = DSPMMU_CAM_L_SLST_1MB;
851                 order = ORDER_1MB;
852         } else if ((_size >= SZ_64KB) &&
853                    (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
854                    is_aligned(_dspadr, SZ_64KB)) {
855                 unit = SZ_64KB;
856                 slst = DSPMMU_CAM_L_SLST_64KB;
857                 order = ORDER_64KB;
858         } else /* if (_size >= SZ_4KB) */ {
859                 unit = SZ_4KB;
860                 slst = DSPMMU_CAM_L_SLST_4KB;
861                 order = ORDER_4KB;
862         }
863 #if 0   /* 1KB is not enabled */
864         else if (_size >= SZ_1KB) {
865                 unit = SZ_1KB;
866                 slst = DSPMMU_CAM_L_SLST_1KB;
867                 order = XXX;
868         }
869 #endif
870
871         /* buffer allocation */
872         if (type == EXMAP_TYPE_MEM) {
873                 struct page *page, *ps, *pe;
874
875                 buf = (void *)dsp_mem_get_dma_pages(order);
876                 if (buf == NULL) {
877                         status = -ENOMEM;
878                         goto fail;
879                 }
880                 /* mark the pages as reserved; this is needed for mmap */
881                 ps = virt_to_page(buf);
882                 pe = virt_to_page(buf + unit);
883                 for (page = ps; page < pe; page++) {
884                         SetPageReserved(page);
885                 }
886                 _padr = __pa(buf);
887         }
888
889         /*
890          * mapping for ARM MMU:
891          * we should not access to the allocated memory through 'buf'
892          * since this area should not be cashed.
893          */
894         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
895         if (status < 0)
896                 goto fail;
897
898         /* loading DSP TLB entry */
899         status = dsp_mmu_load_tlb(_dspadr, _padr, slst, 0, DSPMMU_RAM_L_AP_FA);
900         if (status < 0) {
901                 exmap_clear_armmmu((unsigned long)_vadr, unit);
902                 goto fail;
903         }
904
905         exmap_ent->buf      = buf;
906         exmap_ent->vadr     = _vadr;
907         exmap_ent->order    = order;
908         exmap_ent->valid    = 1;
909         exmap_ent->cntnu    = cntnu;
910         exmap_ent->type     = type;
911         exmap_ent->usecount = 0;
912
913         if ((_size -= unit) == 0) {     /* normal completion */
914                 up_write(&exmap_sem);
915                 return size;
916         }
917
918         _dspadr += unit;
919         _vadr   += unit;
920         _padr = padr ? _padr + unit : 0;
921         cntnu = 1;
922         goto start;
923
924 fail:
925         up_write(&exmap_sem);
926         if (buf)
927                 dsp_mem_free_pages((unsigned long)buf, order);
928         dsp_exunmap(dspadr);
929         return status;
930 }
931
932 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
933 {
934         unsigned long size;
935
936         /* clearing ARM MMU */
937         size = 1 << (ent->order + PAGE_SHIFT);
938         exmap_clear_armmmu((unsigned long)ent->vadr, size);
939
940         /* freeing allocated memory */
941         if (ent->type == EXMAP_TYPE_MEM) {
942                 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
943                 printk(KERN_DEBUG
944                        "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
945                        size, ent->buf);
946         }
947
948         return size;
949 }
950
951 static int dsp_exunmap(unsigned long dspadr)
952 {
953         void *vadr;
954         unsigned long size;
955         int total = 0;
956         struct exmap_tbl *ent;
957         int idx;
958
959         vadr = dspbyte_to_virt(dspadr);
960         down_write(&exmap_sem);
961         for (idx = 0; idx < DSPMMU_TLB_LINES; idx++) {
962                 ent = &exmap_tbl[idx];
963                 if (!ent->valid)
964                         continue;
965                 if (ent->vadr == vadr)
966                         goto found_map;
967         }
968         up_write(&exmap_sem);
969         printk(KERN_WARNING
970                "omapdsp: address %06lx not found in exmap_tbl.\n", dspadr);
971         return -EINVAL;
972
973 found_map:
974         if (ent->usecount > 0) {
975                 printk(KERN_ERR
976                        "omapdsp: exmap reference count is not 0.\n"
977                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
978                        idx, ent->vadr, ent->order, ent->usecount);
979                 up_write(&exmap_sem);
980                 return -EINVAL;
981         }
982         /* clearing DSP TLB entry */
983         dsp_mmu_clear_tlb(dspadr);
984
985         /* clear ARM MMU and free buffer */
986         size = unmap_free_arm(ent);
987         ent->valid = 0;
988         total += size;
989
990         /* we don't free PTEs */
991
992         /* flush TLB */
993         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
994
995         /* check if next mapping is in same group */
996         if (++idx == DSPMMU_TLB_LINES)
997                 goto up_out;    /* normal completion */
998         ent = &exmap_tbl[idx];
999         if (!ent->valid || !ent->cntnu)
1000                 goto up_out;    /* normal completion */
1001
1002         dspadr += size;
1003         vadr   += size;
1004         if (ent->vadr == vadr)
1005                 goto found_map; /* continue */
1006
1007         printk(KERN_ERR
1008                "omapdsp: illegal exmap_tbl grouping!\n"
1009                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1010                vadr, idx, ent->vadr);
1011         up_write(&exmap_sem);
1012         return -EINVAL;
1013
1014 up_out:
1015         up_write(&exmap_sem);
1016         return total;
1017 }
1018
1019 static void exmap_flush(void)
1020 {
1021         struct exmap_tbl *ent;
1022         int i;
1023
1024         down_write(&exmap_sem);
1025
1026         /* clearing DSP TLB entry */
1027         dsp_mmu_gflush();
1028
1029         /* exmap_tbl[0] should be preserved */
1030         for (i = 1; i < DSPMMU_TLB_LINES; i++) {
1031                 ent = &exmap_tbl[i];
1032                 if (ent->valid) {
1033                         unmap_free_arm(ent);
1034                         ent->valid = 0;
1035                 }
1036         }
1037
1038         /* flush TLB */
1039         flush_tlb_kernel_range(dspmem_base + dspmem_size,
1040                                dspmem_base + DSPSPACE_SIZE);
1041         up_write(&exmap_sem);
1042 }
1043
1044 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1045 #ifndef CONFIG_FB
1046 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1047 #endif /* CONFIG_FB */
1048
1049 static int dsp_fbexport(unsigned long *dspadr)
1050 {
1051         unsigned long dspadr_actual;
1052         unsigned long padr_sys, padr, fbsz_sys, fbsz;
1053         int cnt;
1054
1055         printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1056
1057         if (num_registered_fb == 0) {
1058                 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1059                 return -EINVAL;
1060         }
1061         if (num_registered_fb != 1) {
1062                 printk(KERN_INFO
1063                        "omapdsp: %d frame buffers found. we use first one.\n",
1064                        num_registered_fb);
1065         }
1066         padr_sys = registered_fb[0]->fix.smem_start;
1067         fbsz_sys = registered_fb[0]->fix.smem_len;
1068         if (fbsz_sys == 0) {
1069                 printk(KERN_ERR
1070                        "omapdsp: framebuffer doesn't seem to be configured "
1071                        "correctly! (size=0)\n");
1072                 return -EINVAL;
1073         }
1074
1075         /*
1076          * align padr and fbsz to 4kB boundary
1077          * (should be noted to the user afterwards!)
1078          */
1079         padr = padr_sys & ~(SZ_4KB-1);
1080         fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1081
1082         /* line up dspadr offset with padr */
1083         dspadr_actual =
1084                 (fbsz > SZ_1MB) ?  lineup_offset(*dspadr, padr, SZ_1MB-1) :
1085                 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1086                 /* (fbsz > SZ_4KB) ? */ *dspadr;
1087         if (dspadr_actual != *dspadr)
1088                 printk(KERN_DEBUG
1089                        "omapdsp: actual dspadr for FBEXPORT = %08lx\n",
1090                        dspadr_actual);
1091         *dspadr = dspadr_actual;
1092
1093         cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1094         if (cnt < 0) {
1095                 printk(KERN_ERR "omapdsp: exmap failure.\n");
1096                 return cnt;
1097         }
1098
1099         if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1100                 printk(KERN_WARNING
1101 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1102 "  !!  screen base address or size is not aligned in 4kB:           !!\n"
1103 "  !!    actual screen  adr = %08lx, size = %08lx             !!\n"
1104 "  !!    exporting      adr = %08lx, size = %08lx             !!\n"
1105 "  !!  Make sure that the framebuffer is allocated with 4kB-order!  !!\n"
1106 "  !!  Otherwise DSP can corrupt the kernel memory.                 !!\n"
1107 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1108                        padr_sys, fbsz_sys, padr, fbsz);
1109         }
1110
1111         /* increase the DMA priority */
1112         set_emiff_dma_prio(15);
1113
1114         return cnt;
1115 }
1116
1117 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1118
1119 static int dsp_fbexport(unsigned long *dspadr)
1120 {
1121         printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1122         return -EINVAL;
1123 }
1124
1125 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1126
1127 static int dsp_mmu_itack(void)
1128 {
1129         unsigned long dspadr;
1130
1131         printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1132         if (!dsp_err_mmu_isset()) {
1133                 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1134                 return -EINVAL;
1135         }
1136         dspadr = dsp_fault_adr & ~(SZ_4K-1);
1137         dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM);    /* FIXME: reserve TLB entry for this */
1138         printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1139         dsp_runlevel(OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY);
1140         __dsp_mmu_itack();
1141         udelay(100);
1142         dsp_exunmap(dspadr);
1143         dsp_err_mmu_clear();
1144         return 0;
1145 }
1146
1147 static void dsp_mmu_init(void)
1148 {
1149         unsigned long phys;
1150         void *virt;
1151
1152         clk_use(dsp_ck_handle);
1153         down_write(&exmap_sem);
1154
1155         dsp_mmu_disable();      /* clear all */
1156         udelay(100);
1157         dsp_mmu_enable();
1158
1159         /* mapping for ARM MMU */
1160         phys = __pa(dspvect_page);
1161         virt = dspbyte_to_virt(DSP_INIT_PAGE);  /* 0xe0fff000 */
1162         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1163         exmap_tbl[0].buf      = dspvect_page;
1164         exmap_tbl[0].vadr     = virt;
1165         exmap_tbl[0].usecount = 0;
1166         exmap_tbl[0].order    = 0;
1167         exmap_tbl[0].valid    = 1;
1168         exmap_tbl[0].cntnu    = 0;
1169
1170         /* DSP TLB initialization */
1171         set_tlb_lock(0, 0);
1172         /* preserved, full access */
1173         dsp_mmu_load_tlb(DSP_INIT_PAGE, phys, DSPMMU_CAM_L_SLST_4KB,
1174                          DSPMMU_CAM_L_P, DSPMMU_RAM_L_AP_FA);
1175         up_write(&exmap_sem);
1176         clk_unuse(dsp_ck_handle);
1177 }
1178
1179 static void dsp_mmu_shutdown(void)
1180 {
1181         exmap_flush();
1182         dsp_mmu_disable();      /* clear all */
1183 }
1184
1185 /*
1186  * intmem_enable() / disable():
1187  * if the address is in DSP internal memories,
1188  * we send PM mailbox commands so that DSP DMA domain won't go in idle
1189  * when ARM is accessing to those memories.
1190  */
1191 static int intmem_enable(void)
1192 {
1193         int ret = 0;
1194
1195         if (dsp_is_ready())
1196                 ret = dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_ENABLE,
1197                                  DSPREG_ICR_DMA_IDLE_DOMAIN);
1198
1199         return ret;
1200 }
1201
1202 static void intmem_disable(void) {
1203         if (dsp_is_ready())
1204                 dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_DISABLE,
1205                            DSPREG_ICR_DMA_IDLE_DOMAIN);
1206 }
1207
1208 /*
1209  * dsp_mem_enable() / disable()
1210  */
1211 int intmem_usecount;
1212
1213 int dsp_mem_enable(void *adr)
1214 {
1215         int ret = 0;
1216
1217         if (is_dsp_internal_mem(adr)) {
1218                 if (intmem_usecount++ == 0)
1219                         ret = omap_dsp_request_mem();
1220         } else
1221                 down_read(&exmap_sem);
1222
1223         return ret;
1224 }
1225
1226 void dsp_mem_disable(void *adr)
1227 {
1228         if (is_dsp_internal_mem(adr)) {
1229                 if (--intmem_usecount == 0)
1230                         omap_dsp_release_mem();
1231         } else
1232                 up_read(&exmap_sem);
1233 }
1234
1235 /* for safety */
1236 void dsp_mem_usecount_clear(void)
1237 {
1238         if (intmem_usecount != 0) {
1239                 printk(KERN_WARNING
1240                        "omapdsp: unbalanced memory request/release detected.\n"
1241                        "         intmem_usecount is not zero at where "
1242                        "it should be! ... fixed to be zero.\n");
1243                 intmem_usecount = 0;
1244                 omap_dsp_release_mem();
1245         }
1246 }
1247
1248 /*
1249  * dsp_mem file operations
1250  */
1251 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1252 {
1253         loff_t ret;
1254
1255         down(&file->f_dentry->d_inode->i_sem);
1256         switch (orig) {
1257         case 0:
1258                 file->f_pos = offset;
1259                 ret = file->f_pos;
1260                 break;
1261         case 1:
1262                 file->f_pos += offset;
1263                 ret = file->f_pos;
1264                 break;
1265         default:
1266                 ret = -EINVAL;
1267         }
1268         up(&file->f_dentry->d_inode->i_sem);
1269         return ret;
1270 }
1271
1272 static ssize_t intmem_read(struct file *file, char *buf, size_t count,
1273                            loff_t *ppos)
1274 {
1275         unsigned long p = *ppos;
1276         void *vadr = dspbyte_to_virt(p);
1277         ssize_t size = dspmem_size;
1278         ssize_t read;
1279
1280         if (p >= size)
1281                 return 0;
1282         clk_use(api_ck_handle);
1283         read = count;
1284         if (count > size - p)
1285                 read = size - p;
1286         if (copy_to_user(buf, vadr, read)) {
1287                 read = -EFAULT;
1288                 goto out;
1289         }
1290         *ppos += read;
1291 out:
1292         clk_unuse(api_ck_handle);
1293         return read;
1294 }
1295
1296 static ssize_t exmem_read(struct file *file, char *buf, size_t count,
1297                           loff_t *ppos)
1298 {
1299         unsigned long p = *ppos;
1300         void *vadr = dspbyte_to_virt(p);
1301
1302         if (!exmap_valid(vadr, count)) {
1303                 printk(KERN_ERR
1304                        "omapdsp: DSP address %08lx / size %08x "
1305                        "is not valid!\n", p, count);
1306                 return -EFAULT;
1307         }
1308         if (count > DSPSPACE_SIZE - p)
1309                 count = DSPSPACE_SIZE - p;
1310         if (copy_to_user(buf, vadr, count))
1311                 return -EFAULT;
1312         *ppos += count;
1313
1314         return count;
1315 }
1316
1317 static ssize_t dsp_mem_read(struct file *file, char *buf, size_t count,
1318                             loff_t *ppos)
1319 {
1320         int ret;
1321         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1322
1323         if (dsp_mem_enable(vadr) < 0)
1324                 return -EBUSY;
1325         if (is_dspbyte_internal_mem(*ppos))
1326                 ret = intmem_read(file, buf, count, ppos);
1327         else
1328                 ret = exmem_read(file, buf, count, ppos);
1329         dsp_mem_disable(vadr);
1330
1331         return ret;
1332 }
1333
1334 static ssize_t intmem_write(struct file *file, const char *buf, size_t count,
1335                             loff_t *ppos)
1336 {
1337         unsigned long p = *ppos;
1338         void *vadr = dspbyte_to_virt(p);
1339         ssize_t size = dspmem_size;
1340         ssize_t written;
1341
1342         if (p >= size)
1343                 return 0;
1344         clk_use(api_ck_handle);
1345         written = count;
1346         if (count > size - p)
1347                 written = size - p;
1348         if (copy_from_user(vadr, buf, written)) {
1349                 written = -EFAULT;
1350                 goto out;
1351         }
1352         *ppos += written;
1353 out:
1354         clk_unuse(api_ck_handle);
1355         return written;
1356 }
1357
1358 static ssize_t exmem_write(struct file *file, const char *buf, size_t count,
1359                            loff_t *ppos)
1360 {
1361         unsigned long p = *ppos;
1362         void *vadr = dspbyte_to_virt(p);
1363
1364         if (!exmap_valid(vadr, count)) {
1365                 printk(KERN_ERR
1366                        "omapdsp: DSP address %08lx / size %08x "
1367                        "is not valid!\n", p, count);
1368                 return -EFAULT;
1369         }
1370         if (count > DSPSPACE_SIZE - p)
1371                 count = DSPSPACE_SIZE - p;
1372         if (copy_from_user(vadr, buf, count))
1373                 return -EFAULT;
1374         *ppos += count;
1375
1376         return count;
1377 }
1378
1379 static ssize_t dsp_mem_write(struct file *file, const char *buf, size_t count,
1380                              loff_t *ppos)
1381 {
1382         int ret;
1383         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1384
1385         if (dsp_mem_enable(vadr) < 0)
1386                 return -EBUSY;
1387         if (is_dspbyte_internal_mem(*ppos))
1388                 ret = intmem_write(file, buf, count, ppos);
1389         else
1390                 ret = exmem_write(file, buf, count, ppos);
1391         dsp_mem_disable(vadr);
1392
1393         return ret;
1394 }
1395
1396 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1397                          unsigned int cmd, unsigned long arg)
1398 {
1399         switch (cmd) {
1400         case OMAP_DSP_MEM_IOCTL_MMUINIT:
1401                 dsp_mmu_init();
1402                 return 0;
1403
1404         case OMAP_DSP_MEM_IOCTL_EXMAP:
1405                 {
1406                         struct omap_dsp_mapinfo mapinfo;
1407                         if (copy_from_user(&mapinfo, (void *)arg,
1408                                            sizeof(mapinfo)))
1409                                 return -EFAULT;
1410                         return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1411                                          EXMAP_TYPE_MEM);
1412                 }
1413
1414         case OMAP_DSP_MEM_IOCTL_EXUNMAP:
1415                 return dsp_exunmap((unsigned long)arg);
1416
1417         case OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH:
1418                 exmap_flush();
1419                 return 0;
1420
1421         case OMAP_DSP_MEM_IOCTL_FBEXPORT:
1422                 {
1423                         unsigned long dspadr;
1424                         int ret;
1425                         if (copy_from_user(&dspadr, (void *)arg, sizeof(long)))
1426                                 return -EFAULT;
1427                         ret = dsp_fbexport(&dspadr);
1428                         if (copy_to_user((void *)arg, &dspadr, sizeof(long)))
1429                                 return -EFAULT;
1430                         return ret;
1431                 }
1432
1433         case OMAP_DSP_MEM_IOCTL_MMUITACK:
1434                 return dsp_mmu_itack();
1435
1436         case OMAP_DSP_MEM_IOCTL_KMEM_RESERVE:
1437                 {
1438                         unsigned long size;
1439                         if (copy_from_user(&size, (void *)arg, sizeof(long)))
1440                                 return -EFAULT;
1441                         return dsp_kmem_reserve(size);
1442                 }
1443
1444         case OMAP_DSP_MEM_IOCTL_KMEM_RELEASE:
1445                 dsp_kmem_release();
1446                 return 0;
1447
1448         default:
1449                 return -ENOIOCTLCMD;
1450         }
1451 }
1452
1453 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1454 {
1455         /*
1456          * FIXME
1457          */
1458         return -ENOSYS;
1459 }
1460
1461 static int dsp_mem_open(struct inode *inode, struct file *file)
1462 {
1463         if (!capable(CAP_SYS_RAWIO))
1464                 return -EPERM;
1465
1466         return 0;
1467 }
1468
1469 static int dsp_mem_release(struct inode *inode, struct file *file)
1470 {
1471         return 0;
1472 }
1473
1474 /*
1475  * sysfs files
1476  */
1477 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
1478                         char *buf)
1479 {
1480         int len;
1481         int lbase, victim;
1482         int i;
1483
1484         clk_use(dsp_ck_handle);
1485         down_read(&exmap_sem);
1486
1487         get_tlb_lock(&lbase, &victim);
1488
1489         len = sprintf(buf, "p: preserved,  v: valid\n"
1490                            "ety       cam_va     ram_pa   sz ap\n");
1491                         /* 00: p v 0x300000 0x10171800 64KB FA */
1492         for (i = 0; i < 32; i++) {
1493                 unsigned short cam_h, cam_l, ram_h, ram_l;
1494                 unsigned short cam_l_va_mask, prsvd, cam_vld, slst;
1495                 unsigned long cam_va;
1496                 unsigned short ram_l_ap;
1497                 unsigned long ram_pa;
1498                 char *pgsz_str, *ap_str;
1499
1500                 /* read a TLB entry */
1501                 __read_tlb(lbase, i, &cam_h, &cam_l, &ram_h, &ram_l);
1502
1503                 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
1504                 cam_l_va_mask = get_cam_l_va_mask(slst);
1505                 pgsz_str = (slst == DSPMMU_CAM_L_SLST_1MB) ? " 1MB":
1506                            (slst == DSPMMU_CAM_L_SLST_64KB)? "64KB":
1507                            (slst == DSPMMU_CAM_L_SLST_4KB) ? " 4KB":
1508                                                              " 1KB";
1509                 prsvd    = cam_l & DSPMMU_CAM_L_P;
1510                 cam_vld  = cam_l & DSPMMU_CAM_L_V;
1511                 ram_l_ap = ram_l & DSPMMU_RAM_L_AP_MASK;
1512                 ap_str = (ram_l_ap == DSPMMU_RAM_L_AP_RO) ? "RO":
1513                          (ram_l_ap == DSPMMU_RAM_L_AP_FA) ? "FA":
1514                                                             "NA";
1515                 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
1516                          (unsigned long)(cam_l & cam_l_va_mask) << 6;
1517                 ram_pa = (unsigned long)ram_h << 16 |
1518                          (ram_l & DSPMMU_RAM_L_RAM_LSB_MASK);
1519
1520                 if (i == lbase)
1521                         len += sprintf(buf + len, "lock base = %d\n", lbase);
1522                 if (i == victim)
1523                         len += sprintf(buf + len, "victim    = %d\n", victim);
1524                 /* 00: p v 0x300000 0x10171800 64KB FA */
1525                 len += sprintf(buf + len,
1526                                "%02d: %c %c 0x%06lx 0x%08lx %s %s\n",
1527                                i,
1528                                prsvd   ? 'p' : ' ',
1529                                cam_vld ? 'v' : ' ',
1530                                cam_va, ram_pa, pgsz_str, ap_str);
1531         }
1532
1533         /* restore victim entry */
1534         set_tlb_lock(lbase, victim);
1535
1536         up_read(&exmap_sem);
1537         clk_unuse(dsp_ck_handle);
1538         return len;
1539 }
1540
1541 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
1542
1543 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1544                           char *buf)
1545 {
1546         int len;
1547         int i;
1548
1549         down_read(&exmap_sem);
1550         len = sprintf(buf, "v: valid,  c: cntnu\n"
1551                            "ety           vadr        buf od uc\n");
1552                          /* 00: v c 0xe0300000 0xc0171800  0 */
1553         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1554                 struct exmap_tbl *ent = &exmap_tbl[i];
1555                 /* 00: v c 0xe0300000 0xc0171800  0 */
1556                 len += sprintf(buf + len, "%02d: %c %c 0x%8p 0x%8p %2d %2d\n",
1557                                i,
1558                                ent->valid ? 'v' : ' ',
1559                                ent->cntnu ? 'c' : ' ',
1560                                ent->vadr, ent->buf, ent->order, ent->usecount);
1561         }
1562
1563         up_read(&exmap_sem);
1564         return len;
1565 }
1566
1567 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
1568
1569 static ssize_t kmem_pool_show(struct device *dev,
1570                               struct device_attribute *attr, char *buf)
1571 {
1572         int count_1M, count_64K, total;
1573
1574         count_1M = kmem_pool_1M.count;
1575         count_64K = kmem_pool_64K.count;
1576         total = count_1M * SZ_1MB + count_64K * SZ_64KB;
1577
1578         return sprintf(buf, "0x%x %d %d\n", total, count_1M, count_64K);
1579 }
1580
1581 static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool);
1582
1583 /*
1584  * DSP MMU interrupt handler
1585  */
1586
1587 /*
1588  * MMU fault mask:
1589  * We ignore prefetch err.
1590  */
1591 #define MMUFAULT_MASK \
1592         (DSPMMU_FAULT_ST_PERM |\
1593          DSPMMU_FAULT_ST_TLB_MISS |\
1594          DSPMMU_FAULT_ST_TRANS)
1595 irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1596 {
1597         unsigned short status;
1598         unsigned short adh, adl;
1599         unsigned short dp;
1600
1601         status = omap_readw(DSPMMU_FAULT_ST);
1602         adh = omap_readw(DSPMMU_FAULT_AD_H);
1603         adl = omap_readw(DSPMMU_FAULT_AD_L);
1604         dp = adh & DSPMMU_FAULT_AD_H_DP;
1605         dsp_fault_adr = MKLONG(adh & DSPMMU_FAULT_AD_H_ADR_MASK, adl);
1606         /* if the fault is masked, nothing to do */
1607         if ((status & MMUFAULT_MASK) == 0) {
1608                 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
1609                 /*
1610                  * note: in OMAP1710,
1611                  * when CACHE + DMA domain gets out of idle in DSP,
1612                  * MMU interrupt occurs but DSPMMU_FAULT_ST is not set.
1613                  * in this case, we just ignore the interrupt.
1614                  */
1615                 if (status) {
1616                         printk(KERN_DEBUG "%s%s%s%s\n",
1617                                (status & DSPMMU_FAULT_ST_PREF)?
1618                                         "  (prefetch err)" : "",
1619                                (status & DSPMMU_FAULT_ST_PERM)?
1620                                         "  (permission fault)" : "",
1621                                (status & DSPMMU_FAULT_ST_TLB_MISS)?
1622                                         "  (TLB miss)" : "",
1623                                (status & DSPMMU_FAULT_ST_TRANS) ?
1624                                         "  (translation fault)": "");
1625                         printk(KERN_DEBUG
1626                                "fault address = %s: 0x%06lx\n",
1627                                dp ? "DATA" : "PROGRAM",
1628                                dsp_fault_adr);
1629                 }
1630                 return IRQ_HANDLED;
1631         }
1632
1633         printk(KERN_INFO "DSP MMU interrupt!\n");
1634         printk(KERN_INFO "%s%s%s%s\n",
1635                (status & DSPMMU_FAULT_ST_PREF)?
1636                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_PREF)?
1637                                 "  prefetch err":
1638                                 "  (prefetch err)":
1639                                 "",
1640                (status & DSPMMU_FAULT_ST_PERM)?
1641                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_PERM)?
1642                                 "  permission fault":
1643                                 "  (permission fault)":
1644                                 "",
1645                (status & DSPMMU_FAULT_ST_TLB_MISS)?
1646                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_TLB_MISS)?
1647                                 "  TLB miss":
1648                                 "  (TLB miss)":
1649                                 "",
1650                (status & DSPMMU_FAULT_ST_TRANS)?
1651                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_TRANS)?
1652                                 "  translation fault":
1653                                 "  (translation fault)":
1654                                 "");
1655         printk(KERN_INFO "fault address = %s: 0x%06lx\n",
1656                dp ? "DATA" : "PROGRAM",
1657                dsp_fault_adr);
1658
1659         if (dsp_is_ready()) {
1660                 /*
1661                  * If we call dsp_exmap() here,
1662                  * "kernel BUG at slab.c" occurs.
1663                  */
1664                 /* FIXME */
1665                 dsp_err_mmu_set(dsp_fault_adr);
1666         } else {
1667                 disable_irq(INT_DSP_MMU);
1668                 __dsp_mmu_itack();
1669                 printk(KERN_INFO "Resetting DSP...\n");
1670                 dsp_cpustat_request(CPUSTAT_RESET);
1671                 enable_irq(INT_DSP_MMU);
1672                 /*
1673                  * if we enable followings, semaphore lock should be avoided.
1674                  *
1675                 printk(KERN_INFO "Flushing DSP MMU...\n");
1676                 exmap_flush();
1677                 dsp_mmu_init();
1678                  */
1679         }
1680
1681         return IRQ_HANDLED;
1682 }
1683
1684 /*
1685  *
1686  */
1687 struct file_operations dsp_mem_fops = {
1688         .owner   = THIS_MODULE,
1689         .llseek  = dsp_mem_lseek,
1690         .read    = dsp_mem_read,
1691         .write   = dsp_mem_write,
1692         .ioctl   = dsp_mem_ioctl,
1693         .mmap    = dsp_mem_mmap,
1694         .open    = dsp_mem_open,
1695         .release = dsp_mem_release,
1696 };
1697
1698 void dsp_mem_start(void)
1699 {
1700         dsp_register_mem_cb(intmem_enable, intmem_disable);
1701 }
1702
1703 void dsp_mem_stop(void)
1704 {
1705         memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
1706         dsp_unregister_mem_cb();
1707 }
1708
1709 int __init dsp_mem_init(void)
1710 {
1711         int i;
1712
1713         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1714                 exmap_tbl[i].valid = 0;
1715         }
1716
1717         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
1718         if (dspvect_page == NULL) {
1719                 printk(KERN_ERR
1720                        "omapdsp: failed to allocate memory "
1721                        "for dsp vector table\n");
1722                 return -ENOMEM;
1723         }
1724         dsp_mmu_init();
1725         dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
1726
1727         device_create_file(&dsp_device.dev, &dev_attr_mmu);
1728         device_create_file(&dsp_device.dev, &dev_attr_exmap);
1729         device_create_file(&dsp_device.dev, &dev_attr_kmem_pool);
1730
1731         return 0;
1732 }
1733
1734 void dsp_mem_exit(void)
1735 {
1736         dsp_mmu_shutdown();
1737         dsp_kmem_release();
1738
1739         if (dspvect_page != NULL) {
1740                 unsigned long virt;
1741
1742                 down_read(&exmap_sem);
1743
1744                 virt = (unsigned long)dspbyte_to_virt(DSP_INIT_PAGE);
1745                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
1746                 free_page((unsigned long)dspvect_page);
1747                 dspvect_page = NULL;
1748
1749                 up_read(&exmap_sem);
1750         }
1751
1752         device_remove_file(&dsp_device.dev, &dev_attr_mmu);
1753         device_remove_file(&dsp_device.dev, &dev_attr_exmap);
1754         device_remove_file(&dsp_device.dev, &dev_attr_kmem_pool);
1755 }