]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/mmu.c
DSP: Move code to use only one dsp_common.h
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
1 /*
2  * linux/arch/arm/plat-omap/mmu.c
3  *
4  * OMAP MMU management framework
5  *
6  * Copyright (C) 2002-2006 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <lethal@linux-sh.org>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  */
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/err.h>
32 #include <linux/clk.h>
33 #include <linux/device.h>
34 #include <linux/interrupt.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/arch/mmu.h>
40 #include <asm/sizes.h>
41 #include <asm/arch/dsp_common.h>
42
43 #if defined(CONFIG_ARCH_OMAP1)
44 #include "../mach-omap1/mmu.h"
45 #elif defined(CONFIG_ARCH_OMAP2)
46 #include "../mach-omap2/mmu.h"
47 #endif
48
49 /*
50  * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
51  * MMU has base and victim implemented in different bits in the LOCK
52  * register (shifts are still the same), all of the other registers are
53  * the same on all of the MMUs..
54  */
55 #define MMU_LOCK_BASE_SHIFT             10
56 #define MMU_LOCK_VICTIM_SHIFT           4
57
58 #define CAMERA_MMU_LOCK_BASE_MASK       (0x7 << MMU_LOCK_BASE_SHIFT)
59 #define CAMERA_MMU_LOCK_VICTIM_MASK     (0x7 << MMU_LOCK_VICTIM_SHIFT)
60
61 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
62 #define ORDER_1MB       (20 - PAGE_SHIFT)
63 #define ORDER_64KB      (16 - PAGE_SHIFT)
64 #define ORDER_4KB       (12 - PAGE_SHIFT)
65
66 #define MMU_CNTL_EMUTLBUPDATE   (1<<3)
67 #define MMU_CNTL_TWLENABLE      (1<<2)
68 #define MMU_CNTL_MMUENABLE      (1<<1)
69
70 static mempool_t *mempool_1M;
71 static mempool_t *mempool_64K;
72
73 #define omap_mmu_for_each_tlb_entry(mmu, entry)                 \
74         for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
75              entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
76              entry++)
77
78 #define to_dev(obj)     container_of(obj, struct device, kobj)
79
80 static void *mempool_alloc_from_pool(mempool_t *pool,
81                                      unsigned int __nocast gfp_mask)
82 {
83         spin_lock_irq(&pool->lock);
84         if (likely(pool->curr_nr)) {
85                 void *element = pool->elements[--pool->curr_nr];
86                 spin_unlock_irq(&pool->lock);
87                 return element;
88         }
89
90         spin_unlock_irq(&pool->lock);
91         return mempool_alloc(pool, gfp_mask);
92 }
93
94 /*
95  * kmem_reserve(), kmem_release():
96  * reserve or release kernel memory for exmap().
97  *
98  * exmap() might request consecutive 1MB or 64kB,
99  * but it will be difficult after memory pages are fragmented.
100  * So, user can reserve such memory blocks in the early phase
101  * through kmem_reserve().
102  */
103 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
104 {
105         return (void *)__get_dma_pages(gfp, (unsigned int)order);
106 }
107
108 static void omap_mmu_pool_free(void *buf, void *order)
109 {
110         free_pages((unsigned long)buf, (unsigned int)order);
111 }
112
113 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
114 {
115         unsigned long len = size;
116
117         /* alignment check */
118         if (!is_aligned(size, SZ_64K)) {
119                 dev_err(&mmu->dev,
120                         "MMU %s: size(0x%lx) is not multiple of 64KB.\n",
121                         mmu->name, size);
122                 return -EINVAL;
123         }
124
125         if (size > (1 << mmu->addrspace)) {
126                 dev_err(&mmu->dev,
127                         "MMU %s: size(0x%lx) is larger than external device "
128                         " memory space size (0x%x.\n", mmu->name, size,
129                         (1 << mmu->addrspace));
130                 return -EINVAL;
131         }
132
133         if (size >= SZ_1M) {
134                 int nr = size >> 20;
135
136                 if (likely(!mempool_1M))
137                         mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
138                                                     omap_mmu_pool_free,
139                                                     (void *)ORDER_1MB);
140                 else
141                         mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
142                                        GFP_KERNEL);
143
144                 size &= ~(0xf << 20);
145         }
146
147         if (size >= SZ_64K) {
148                 int nr = size >> 16;
149
150                 if (likely(!mempool_64K))
151                         mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
152                                                      omap_mmu_pool_free,
153                                                      (void *)ORDER_64KB);
154                 else
155                         mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
156                                        GFP_KERNEL);
157
158                 size &= ~(0xf << 16);
159         }
160
161         if (size)
162                 len -= size;
163
164         return len;
165 }
166 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
167
168 void omap_mmu_kmem_release(void)
169 {
170         if (mempool_64K) {
171                 mempool_destroy(mempool_64K);
172                 mempool_64K = NULL;
173         }
174
175         if (mempool_1M) {
176                 mempool_destroy(mempool_1M);
177                 mempool_1M = NULL;
178         }
179 }
180 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
181
182 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
183 {
184         struct page *page, *ps, *pe;
185
186         ps = virt_to_page(buf);
187         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
188
189         for (page = ps; page < pe; page++)
190                 ClearPageReserved(page);
191
192         if ((order == ORDER_64KB) && likely(mempool_64K))
193                 mempool_free((void *)buf, mempool_64K);
194         else if ((order == ORDER_1MB) && likely(mempool_1M))
195                 mempool_free((void *)buf, mempool_1M);
196         else
197                 free_pages(buf, order);
198 }
199
200 /*
201  * ARM MMU operations
202  */
203 int exmap_set_armmmu(struct omap_mmu *mmu, unsigned long virt,
204                      unsigned long phys, unsigned long size)
205 {
206         long off;
207         unsigned long sz_left;
208         pmd_t *pmdp;
209         pte_t *ptep;
210         int prot_pmd, prot_pte;
211
212         dev_dbg(&mmu->dev,
213                 "MMU %s: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
214                 mmu->name, virt, phys, size);
215
216         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
217         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
218
219         pmdp = pmd_offset(pgd_offset_k(virt), virt);
220         if (pmd_none(*pmdp)) {
221                 ptep = pte_alloc_one_kernel(&init_mm, 0);
222                 if (ptep == NULL)
223                         return -ENOMEM;
224                 /* note: two PMDs will be set  */
225                 pmd_populate_kernel(&init_mm, pmdp, ptep);
226         }
227
228         off = phys - virt;
229         for (sz_left = size;
230              sz_left >= PAGE_SIZE;
231              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
232                 ptep = pte_offset_kernel(pmdp, virt);
233                 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
234         }
235         if (sz_left)
236                 BUG();
237
238         return 0;
239 }
240 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
241
242 void exmap_clear_armmmu(struct omap_mmu *mmu, unsigned long virt,
243                         unsigned long size)
244 {
245         unsigned long sz_left;
246         pmd_t *pmdp;
247         pte_t *ptep;
248
249         dev_dbg(&mmu->dev,
250                 "MMU %s: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
251                 mmu->name, virt, size);
252
253         for (sz_left = size;
254              sz_left >= PAGE_SIZE;
255              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
256                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
257                 ptep = pte_offset_kernel(pmdp, virt);
258                 pte_clear(&init_mm, virt, ptep);
259         }
260         if (sz_left)
261                 BUG();
262 }
263 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
264
265 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
266 {
267         /* exmap_sem should be held before calling this function */
268         struct exmap_tbl *ent;
269
270 start:
271         omap_mmu_for_each_tlb_entry(mmu, ent) {
272                 void *mapadr;
273                 unsigned long mapsize;
274
275                 if (!ent->valid)
276                         continue;
277                 mapadr = (void *)ent->vadr;
278                 mapsize = 1 << (ent->order + PAGE_SHIFT);
279                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
280                         if (vadr + len <= mapadr + mapsize) {
281                                 /* this map covers whole address. */
282                                 return 1;
283                         } else {
284                                 /*
285                                  * this map covers partially.
286                                  * check rest portion.
287                                  */
288                                 len -= mapadr + mapsize - vadr;
289                                 vadr = mapadr + mapsize;
290                                 goto start;
291                         }
292                 }
293         }
294
295         return 0;
296 }
297 EXPORT_SYMBOL_GPL(exmap_valid);
298
299 /*
300  * omap_mmu_exmap_use(), unuse():
301  * when the mapped area is exported to user space with mmap,
302  * the usecount is incremented.
303  * while the usecount > 0, that area can't be released.
304  */
305 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
306 {
307         struct exmap_tbl *ent;
308
309         down_write(&mmu->exmap_sem);
310         omap_mmu_for_each_tlb_entry(mmu, ent) {
311                 void *mapadr;
312                 unsigned long mapsize;
313
314                 if (!ent->valid)
315                         continue;
316                 mapadr = (void *)ent->vadr;
317                 mapsize = 1 << (ent->order + PAGE_SHIFT);
318                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
319                         ent->usecount++;
320         }
321         up_write(&mmu->exmap_sem);
322 }
323 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
324
325 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
326 {
327         struct exmap_tbl *ent;
328
329         down_write(&mmu->exmap_sem);
330         omap_mmu_for_each_tlb_entry(mmu, ent) {
331                 void *mapadr;
332                 unsigned long mapsize;
333
334                 if (!ent->valid)
335                         continue;
336                 mapadr = (void *)ent->vadr;
337                 mapsize = 1 << (ent->order + PAGE_SHIFT);
338                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
339                         ent->usecount--;
340         }
341         up_write(&mmu->exmap_sem);
342 }
343 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
344
345 /*
346  * omap_mmu_virt_to_phys()
347  * returns physical address, and sets len to valid length
348  */
349 unsigned long
350 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
351 {
352         struct exmap_tbl *ent;
353
354         if (omap_mmu_internal_memory(mmu, vadr)) {
355                 unsigned long addr = (unsigned long)vadr;
356                 *len = mmu->membase + mmu->memsize - addr;
357                 return addr;
358         }
359
360         /* EXRAM */
361         omap_mmu_for_each_tlb_entry(mmu, ent) {
362                 void *mapadr;
363                 unsigned long mapsize;
364
365                 if (!ent->valid)
366                         continue;
367                 mapadr = (void *)ent->vadr;
368                 mapsize = 1 << (ent->order + PAGE_SHIFT);
369                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
370                         *len = mapadr + mapsize - vadr;
371                         return __pa(ent->buf) + vadr - mapadr;
372                 }
373         }
374
375         /* valid mapping not found */
376         return 0;
377 }
378 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
379
380 /*
381  * PTE operations
382  */
383 static inline void
384 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
385                        unsigned long phys, int prot)
386 {
387         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
388         if (virt & (1 << SECTION_SHIFT))
389                 pmdp++;
390         *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
391         flush_pmd_entry(pmdp);
392 }
393
394 static inline void
395 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
396                             unsigned long phys, int prot)
397 {
398         int i;
399         for (i = 0; i < 16; i += 1) {
400                 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
401                 virt += (PGDIR_SIZE / 2);
402         }
403 }
404
405 static inline int
406 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
407                     unsigned long phys, pgprot_t prot)
408 {
409         pte_t *ptep;
410         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
411
412         if (!(prot & PTE_TYPE_MASK))
413                 prot |= PTE_TYPE_SMALL;
414
415         if (pmd_none(*pmdp)) {
416                 ptep = pte_alloc_one_kernel(mm, virt);
417                 if (ptep == NULL)
418                         return -ENOMEM;
419                 pmd_populate_kernel(mm, pmdp, ptep);
420         }
421         ptep = pte_offset_kernel(pmdp, virt);
422         ptep -= PTRS_PER_PTE;
423         *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
424         flush_pmd_entry((pmd_t *)ptep);
425         return 0;
426 }
427
428 static inline int
429 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
430                          unsigned long phys, pgprot_t prot)
431 {
432         int i, ret;
433         for (i = 0; i < 16; i += 1) {
434                 ret = omap_mmu_alloc_page(mm, virt, phys,
435                                           prot | PTE_TYPE_LARGE);
436                 if (ret)
437                         return -ENOMEM; /* only 1st time */
438                 virt += PAGE_SIZE;
439         }
440         return 0;
441 }
442
443 static int omap_mmu_load_pte(struct omap_mmu *mmu,
444                              struct omap_mmu_tlb_entry *e)
445 {
446         int ret = 0;
447         struct mm_struct *mm = mmu->twl_mm;
448         const unsigned long va = e->va;
449         const unsigned long pa = e->pa;
450         const pgprot_t prot = mmu->ops->pte_get_attr(e);
451
452         spin_lock(&mm->page_table_lock);
453
454         switch (e->pgsz) {
455         case OMAP_MMU_CAM_PAGESIZE_16MB:
456                 omap_mmu_alloc_supersection(mm, va, pa, prot);
457                 break;
458         case OMAP_MMU_CAM_PAGESIZE_1MB:
459                 omap_mmu_alloc_section(mm, va, pa, prot);
460                 break;
461         case OMAP_MMU_CAM_PAGESIZE_64KB:
462                 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
463                 break;
464         case OMAP_MMU_CAM_PAGESIZE_4KB:
465                 ret = omap_mmu_alloc_page(mm, va, pa, prot);
466                 break;
467         default:
468                 BUG();
469                 break;
470         }
471
472         spin_unlock(&mm->page_table_lock);
473
474         return ret;
475 }
476
477 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
478 {
479         pte_t *ptep, *end;
480         pmd_t *pmdp;
481         struct mm_struct *mm = mmu->twl_mm;
482
483         spin_lock(&mm->page_table_lock);
484
485         pmdp = pmd_offset(pgd_offset(mm, virt), virt);
486
487         if (pmd_none(*pmdp))
488                 goto out;
489
490         if (!pmd_table(*pmdp))
491                 goto invalidate_pmd;
492
493         ptep = pte_offset_kernel(pmdp, virt);
494         pte_clear(mm, virt, ptep);
495         flush_pmd_entry((pmd_t *)ptep);
496
497         /* zap pte */
498         end = pmd_page_vaddr(*pmdp);
499         ptep = end - PTRS_PER_PTE;
500         while (ptep < end) {
501                 if (!pte_none(*ptep))
502                         goto out;
503                 ptep++;
504         }
505         pte_free_kernel(pmd_page_vaddr(*pmdp));
506
507  invalidate_pmd:
508         pmd_clear(pmdp);
509         flush_pmd_entry(pmdp);
510  out:
511         spin_unlock(&mm->page_table_lock);
512 }
513
514 /*
515  * TLB operations
516  */
517 static struct cam_ram_regset *
518 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
519 {
520         return mmu->ops->cam_ram_alloc(mmu, entry);
521 }
522
523 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
524                                   struct cam_ram_regset *cr)
525 {
526         return mmu->ops->cam_ram_valid(cr);
527 }
528
529 static inline void
530 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
531 {
532         unsigned long lock = omap_mmu_read_reg(mmu, OMAP_MMU_LOCK);
533         int mask;
534
535         mask = (mmu->type == OMAP_MMU_CAMERA) ?
536                         CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
537         tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
538
539         mask = (mmu->type == OMAP_MMU_CAMERA) ?
540                         CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
541         tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
542 }
543
544 static inline void
545 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
546 {
547         omap_mmu_write_reg(mmu,
548                            (lock->base << MMU_LOCK_BASE_SHIFT) |
549                            (lock->victim << MMU_LOCK_VICTIM_SHIFT),
550                            OMAP_MMU_LOCK);
551 }
552
553 static inline void omap_mmu_flush(struct omap_mmu *mmu)
554 {
555         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_FLUSH_ENTRY);
556 }
557
558 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
559 {
560         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_LD_TLB);
561 }
562
563 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
564                        struct cam_ram_regset *cr)
565 {
566         /* set victim */
567         omap_mmu_set_tlb_lock(mmu, lock);
568
569         if (likely(mmu->ops->read_tlb))
570                 mmu->ops->read_tlb(mmu, cr);
571 }
572 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
573
574 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
575 {
576         if (likely(mmu->ops->load_tlb))
577                 mmu->ops->load_tlb(mmu, cr);
578
579         /* flush the entry */
580         omap_mmu_flush(mmu);
581
582         /* load a TLB entry */
583         omap_mmu_ldtlb(mmu);
584 }
585
586 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
587                             struct omap_mmu_tlb_entry *entry)
588 {
589         struct omap_mmu_tlb_lock lock;
590         struct cam_ram_regset *cr;
591
592         clk_enable(mmu->clk);
593         omap_dsp_request_mem();
594
595         omap_mmu_get_tlb_lock(mmu, &lock);
596         for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
597                 struct cam_ram_regset tmp;
598
599                 /* read a TLB entry */
600                 omap_mmu_read_tlb(mmu, &lock, &tmp);
601                 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
602                         goto found_victim;
603         }
604         omap_mmu_set_tlb_lock(mmu, &lock);
605
606 found_victim:
607         /* The last entry cannot be locked? */
608         if (lock.victim == (mmu->nr_tlb_entries - 1)) {
609                 dev_err(&mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
610                 return -EBUSY;
611         }
612
613         cr = omap_mmu_cam_ram_alloc(mmu, entry);
614         if (IS_ERR(cr))
615                 return PTR_ERR(cr);
616
617         omap_mmu_load_tlb(mmu, cr);
618         kfree(cr);
619
620         /* update lock base */
621         if (lock.victim == lock.base)
622                 lock.base++;
623
624         omap_mmu_set_tlb_lock(mmu, &lock);
625
626         omap_dsp_release_mem();
627         clk_disable(mmu->clk);
628         return 0;
629 }
630 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
631
632 static inline unsigned long
633 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
634 {
635         return mmu->ops->cam_va(cr);
636 }
637
638 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
639 {
640         struct omap_mmu_tlb_lock lock;
641         int i;
642         int max_valid = 0;
643
644         clk_enable(mmu->clk);
645         omap_dsp_request_mem();
646
647         omap_mmu_get_tlb_lock(mmu, &lock);
648         for (i = 0; i < lock.base; i++) {
649                 struct cam_ram_regset cr;
650
651                 /* read a TLB entry */
652                 lock.victim = i;
653                 omap_mmu_read_tlb(mmu, &lock, &cr);
654                 if (!omap_mmu_cam_ram_valid(mmu, &cr))
655                         continue;
656
657                 if (omap_mmu_cam_va(mmu, &cr) == vadr)
658                         /* flush the entry */
659                         omap_mmu_flush(mmu);
660                 else
661                         max_valid = i;
662         }
663
664         /* set new lock base */
665         lock.base = lock.victim = max_valid + 1;
666         omap_mmu_set_tlb_lock(mmu, &lock);
667
668         omap_dsp_release_mem();
669         clk_disable(mmu->clk);
670         return 0;
671 }
672 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
673
674 static void omap_mmu_gflush(struct omap_mmu *mmu)
675 {
676         struct omap_mmu_tlb_lock lock;
677
678         clk_enable(mmu->clk);
679         omap_dsp_request_mem();
680
681         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_GFLUSH);
682         lock.base = lock.victim = mmu->nr_exmap_preserved;
683         omap_mmu_set_tlb_lock(mmu, &lock);
684
685         omap_dsp_release_mem();
686         clk_disable(mmu->clk);
687 }
688
689 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
690                             struct omap_mmu_tlb_entry *entry)
691 {
692         int ret = -1;
693         /*XXX use PG_flag for prsvd */
694         ret = omap_mmu_load_pte(mmu, entry);
695         if (ret)
696                 return ret;
697         if (entry->tlb)
698                 ret = omap_mmu_load_tlb_entry(mmu, entry);
699         return ret;
700 }
701 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
702
703 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
704 {
705         int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
706         if (ret)
707                 return ret;
708         omap_mmu_clear_pte(mmu, vadr);
709         return ret;
710 }
711 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
712
713 /*
714  * omap_mmu_exmap()
715  *
716  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
717  * In this case, the buffer for external device is allocated in this routine,
718  * then it is mapped.
719  * On the other hand, for example - frame buffer sharing, calls
720  * this function with padr set. It means some known address space
721  * pointed with padr is going to be shared with external device.
722  */
723 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long devadr,
724                    unsigned long padr, unsigned long size,
725                    enum exmap_type type)
726 {
727         unsigned long pgsz;
728         void *buf;
729         unsigned int order = 0;
730         unsigned long unit;
731         int prev = -1;
732         unsigned long _devadr = devadr;
733         unsigned long _padr = padr;
734         void *_vadr = omap_mmu_to_virt(mmu, devadr);
735         unsigned long _size = size;
736         struct omap_mmu_tlb_entry tlb_ent;
737         struct exmap_tbl *exmap_ent, *tmp_ent;
738         int status;
739         int idx;
740
741 #define MINIMUM_PAGESZ  SZ_4K
742         /*
743          * alignment check
744          */
745         if (!is_aligned(size, MINIMUM_PAGESZ)) {
746                 dev_err(&mmu->dev,
747                         "MMU %s: size(0x%lx) is not multiple of 4KB.\n",
748                         mmu->name, size);
749                 return -EINVAL;
750         }
751         if (!is_aligned(devadr, MINIMUM_PAGESZ)) {
752                 dev_err(&mmu->dev,
753                         "MMU %s: external device address(0x%lx) is not"
754                         " aligned.\n", mmu->name, devadr);
755                 return -EINVAL;
756         }
757         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
758                 dev_err(&mmu->dev,
759                         "MMU %s: physical address(0x%lx) is not aligned.\n",
760                         mmu->name, padr);
761                 return -EINVAL;
762         }
763
764         /* address validity check */
765         if ((devadr < mmu->memsize) ||
766             (devadr >= (1 << mmu->addrspace))) {
767                 dev_err(&mmu->dev,
768                         "MMU %s: illegal address/size for %s().\n",
769                         mmu->name, __FUNCTION__);
770                 return -EINVAL;
771         }
772
773         down_write(&mmu->exmap_sem);
774
775         /* overlap check */
776         omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
777                 unsigned long mapsize;
778
779                 if (!tmp_ent->valid)
780                         continue;
781                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
782                 if ((_vadr + size > tmp_ent->vadr) &&
783                     (_vadr < tmp_ent->vadr + mapsize)) {
784                         dev_err(&mmu->dev, "MMU %s: exmap page overlap!\n",
785                                 mmu->name);
786                         up_write(&mmu->exmap_sem);
787                         return -EINVAL;
788                 }
789         }
790
791 start:
792         buf = NULL;
793         /* Are there any free TLB lines?  */
794         for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
795                 if (!mmu->exmap_tbl[idx].valid)
796                         goto found_free;
797
798         dev_err(&mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
799         status = -EBUSY;
800         goto fail;
801
802 found_free:
803         exmap_ent = mmu->exmap_tbl + idx;
804
805         if ((_size >= SZ_1M) &&
806             (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
807             is_aligned(_devadr, SZ_1M)) {
808                 unit = SZ_1M;
809                 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
810         } else if ((_size >= SZ_64K) &&
811                    (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
812                    is_aligned(_devadr, SZ_64K)) {
813                 unit = SZ_64K;
814                 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
815         } else {
816                 unit = SZ_4K;
817                 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
818         }
819
820         order = get_order(unit);
821
822         /* buffer allocation */
823         if (type == EXMAP_TYPE_MEM) {
824                 struct page *page, *ps, *pe;
825
826                 if ((order == ORDER_1MB) && likely(mempool_1M))
827                         buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
828                 else if ((order == ORDER_64KB) && likely(mempool_64K))
829                         buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
830                 else {
831                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
832                         if (buf == NULL) {
833                                 status = -ENOMEM;
834                                 goto fail;
835                         }
836                 }
837
838                 /* mark the pages as reserved; this is needed for mmap */
839                 ps = virt_to_page(buf);
840                 pe = virt_to_page(buf + unit);
841
842                 for (page = ps; page < pe; page++)
843                         SetPageReserved(page);
844
845                 _padr = __pa(buf);
846         }
847
848         /*
849          * mapping for ARM MMU:
850          * we should not access to the allocated memory through 'buf'
851          * since this area should not be cached.
852          */
853         status = exmap_set_armmmu(mmu, (unsigned long)_vadr, _padr, unit);
854         if (status < 0)
855                 goto fail;
856
857         /* loading external device PTE entry */
858         INIT_TLB_ENTRY(&tlb_ent, _devadr, _padr, pgsz);
859         status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
860         if (status < 0) {
861                 exmap_clear_armmmu(mmu, (unsigned long)_vadr, unit);
862                 goto fail;
863         }
864
865         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
866         exmap_ent->link.prev = prev;
867         if (prev >= 0)
868                 mmu->exmap_tbl[prev].link.next = idx;
869
870         if ((_size -= unit) == 0) {     /* normal completion */
871                 up_write(&mmu->exmap_sem);
872                 return size;
873         }
874
875         _devadr += unit;
876         _vadr   += unit;
877         _padr = padr ? _padr + unit : 0;
878         prev = idx;
879         goto start;
880
881 fail:
882         up_write(&mmu->exmap_sem);
883         if (buf)
884                 omap_mmu_free_pages((unsigned long)buf, order);
885         omap_mmu_exunmap(mmu, devadr);
886         return status;
887 }
888 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
889
890 static unsigned long unmap_free_arm(struct omap_mmu *mmu,
891                                     struct exmap_tbl *ent)
892 {
893         unsigned long size;
894
895         /* clearing ARM MMU */
896         size = 1 << (ent->order + PAGE_SHIFT);
897         exmap_clear_armmmu(mmu, (unsigned long)ent->vadr, size);
898
899         /* freeing allocated memory */
900         if (ent->type == EXMAP_TYPE_MEM) {
901                 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
902                 dev_dbg(&mmu->dev, "MMU %s: freeing 0x%lx bytes @ adr 0x%8p\n",
903                         mmu->name, size, ent->buf);
904         }
905
906         ent->valid = 0;
907         return size;
908 }
909
910 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long devadr)
911 {
912         void *vadr;
913         unsigned long size;
914         int total = 0;
915         struct exmap_tbl *ent;
916         int idx;
917
918         vadr = omap_mmu_to_virt(mmu, devadr);
919         down_write(&mmu->exmap_sem);
920         for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
921                 ent = mmu->exmap_tbl + idx;
922                 if (!ent->valid || ent->prsvd)
923                         continue;
924                 if (ent->vadr == vadr)
925                         goto found_map;
926         }
927         up_write(&mmu->exmap_sem);
928         dev_warn(&mmu->dev, "MMU %s: address %06lx not found in exmap_tbl.\n",
929                  mmu->name, devadr);
930         return -EINVAL;
931
932 found_map:
933         if (ent->usecount > 0) {
934                 dev_err(&mmu->dev, "MMU %s: exmap reference count is not 0.\n"
935                         "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
936                         mmu->name, idx, ent->vadr, ent->order, ent->usecount);
937                 up_write(&mmu->exmap_sem);
938                 return -EINVAL;
939         }
940         /* clearing external device PTE entry */
941         omap_mmu_clear_pte_entry(mmu, devadr);
942
943         /* clear ARM MMU and free buffer */
944         size = unmap_free_arm(mmu, ent);
945         total += size;
946
947         /* we don't free PTEs */
948
949         /* flush TLB */
950         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
951
952         /* check if next mapping is in same group */
953         idx = ent->link.next;
954         if (idx < 0)
955                 goto up_out;    /* normal completion */
956         ent = mmu->exmap_tbl + idx;
957         devadr += size;
958         vadr   += size;
959         if (ent->vadr == vadr)
960                 goto found_map; /* continue */
961
962         dev_err(&mmu->dev, "MMU %s: illegal exmap_tbl grouping!\n"
963                 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
964                 mmu->name, vadr, idx, ent->vadr);
965         up_write(&mmu->exmap_sem);
966         return -EINVAL;
967
968 up_out:
969         up_write(&mmu->exmap_sem);
970         return total;
971 }
972 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
973
974 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
975 {
976         struct exmap_tbl *ent;
977
978         down_write(&mmu->exmap_sem);
979
980         /* clearing TLB entry */
981         omap_mmu_gflush(mmu);
982
983         omap_mmu_for_each_tlb_entry(mmu, ent)
984                 if (ent->valid && !ent->prsvd)
985                         unmap_free_arm(mmu, ent);
986
987         /* flush TLB */
988         if (likely(mmu->membase))
989                 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
990                                        mmu->membase + (1 << mmu->addrspace));
991
992         up_write(&mmu->exmap_sem);
993 }
994 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
995
996 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
997                                     unsigned long devadr, int index)
998 {
999         unsigned long phys;
1000         void *virt;
1001         struct omap_mmu_tlb_entry tlb_ent;
1002
1003         phys = __pa(buf);
1004         virt = omap_mmu_to_virt(mmu, devadr);
1005         exmap_set_armmmu(mmu, (unsigned long)virt, phys, PAGE_SIZE);
1006         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1007         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, devadr, phys);
1008         omap_mmu_load_pte_entry(mmu, &tlb_ent);
1009 }
1010 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1011
1012 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long devadr)
1013 {
1014         void *virt = omap_mmu_to_virt(mmu, devadr);
1015
1016         exmap_clear_armmmu(mmu, (unsigned long)virt, PAGE_SIZE);
1017         /* DSP MMU is shutting down. not handled here. */
1018 }
1019 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1020
1021 static void omap_mmu_reset(struct omap_mmu *mmu)
1022 {
1023 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1024         int i;
1025
1026         omap_mmu_write_reg(mmu, 0x2, OMAP_MMU_SYSCONFIG);
1027
1028         for (i = 0; i < 10000; i++)
1029                 if (likely(omap_mmu_read_reg(mmu, OMAP_MMU_SYSSTATUS) & 0x1))
1030                         break;
1031 #endif
1032 }
1033
1034 void omap_mmu_disable(struct omap_mmu *mmu)
1035 {
1036         omap_mmu_write_reg(mmu, 0x00, OMAP_MMU_CNTL);
1037 }
1038 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1039
1040 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1041 {
1042         u32 val = OMAP_MMU_CNTL_MMU_EN | MMU_CNTL_TWLENABLE;
1043
1044         if (likely(reset))
1045                 omap_mmu_reset(mmu);
1046 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1047         omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd),
1048                            OMAP_MMU_TTB);
1049 #else
1050         omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) & 0xffff,
1051                            OMAP_MMU_TTB_L);
1052         omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) >> 16,
1053                            OMAP_MMU_TTB_H);
1054         val |= OMAP_MMU_CNTL_RESET_SW;
1055 #endif
1056         omap_mmu_write_reg(mmu, val, OMAP_MMU_CNTL);
1057 }
1058 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1059
1060 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1061 {
1062         struct omap_mmu *mmu = dev_id;
1063
1064         if (likely(mmu->ops->interrupt))
1065                 mmu->ops->interrupt(mmu);
1066
1067         return IRQ_HANDLED;
1068 }
1069
1070 static int omap_mmu_init(struct omap_mmu *mmu)
1071 {
1072         struct omap_mmu_tlb_lock tlb_lock;
1073         int ret = 0;
1074
1075         clk_enable(mmu->clk);
1076         omap_dsp_request_mem();
1077         down_write(&mmu->exmap_sem);
1078
1079         ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1080                           mmu->name,  mmu);
1081         if (ret < 0) {
1082                 dev_err(&mmu->dev, "MMU %s: failed to register MMU interrupt:"
1083                         " %d\n", mmu->name, ret);
1084                 goto fail;
1085         }
1086
1087         omap_mmu_disable(mmu);  /* clear all */
1088         udelay(100);
1089         omap_mmu_enable(mmu, 1);
1090
1091         memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1092         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1093
1094         if (unlikely(mmu->ops->startup))
1095                 ret = mmu->ops->startup(mmu);
1096  fail:
1097         up_write(&mmu->exmap_sem);
1098         omap_dsp_release_mem();
1099         clk_disable(mmu->clk);
1100
1101         return ret;
1102 }
1103
1104 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1105 {
1106         free_irq(mmu->irq, mmu);
1107
1108         if (unlikely(mmu->ops->shutdown))
1109                 mmu->ops->shutdown(mmu);
1110
1111         omap_mmu_exmap_flush(mmu);
1112         omap_mmu_disable(mmu); /* clear all */
1113 }
1114
1115 /*
1116  * omap_mmu_mem_enable() / disable()
1117  */
1118 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1119 {
1120         if (unlikely(mmu->ops->mem_enable))
1121                 return mmu->ops->mem_enable(mmu, addr);
1122
1123         down_read(&mmu->exmap_sem);
1124         return 0;
1125 }
1126 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1127
1128 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1129 {
1130         if (unlikely(mmu->ops->mem_disable)) {
1131                 mmu->ops->mem_disable(mmu, addr);
1132                 return;
1133         }
1134
1135         up_read(&mmu->exmap_sem);
1136 }
1137 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1138
1139 /*
1140  * dsp_mem file operations
1141  */
1142 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1143                            loff_t *ppos)
1144 {
1145         unsigned long p = *ppos;
1146         void *vadr = omap_mmu_to_virt(mmu, p);
1147         ssize_t size = mmu->memsize;
1148         ssize_t read;
1149
1150         if (p >= size)
1151                 return 0;
1152         clk_enable(mmu->memclk);
1153         read = count;
1154         if (count > size - p)
1155                 read = size - p;
1156         if (copy_to_user(buf, vadr, read)) {
1157                 read = -EFAULT;
1158                 goto out;
1159         }
1160         *ppos += read;
1161 out:
1162         clk_disable(mmu->memclk);
1163         return read;
1164 }
1165
1166 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1167                           loff_t *ppos)
1168 {
1169         unsigned long p = *ppos;
1170         void *vadr = omap_mmu_to_virt(mmu, p);
1171
1172         if (!exmap_valid(mmu, vadr, count)) {
1173                 dev_err(&mmu->dev, "MMU %s: external device address %08lx / "
1174                         "size %08x is not valid!\n", mmu->name, p, count);
1175                 return -EFAULT;
1176         }
1177         if (count > (1 << mmu->addrspace) - p)
1178                 count = (1 << mmu->addrspace) - p;
1179         if (copy_to_user(buf, vadr, count))
1180                 return -EFAULT;
1181         *ppos += count;
1182
1183         return count;
1184 }
1185
1186 static ssize_t omap_mmu_mem_read(struct kobject *kobj,
1187                                  struct bin_attribute * attr,
1188                                  char *buf, loff_t offset, size_t count)
1189 {
1190         struct device *dev = to_dev(kobj);
1191         struct omap_mmu *mmu = dev_get_drvdata(dev);
1192         unsigned long p = (unsigned long)offset;
1193         void *vadr = omap_mmu_to_virt(mmu, p);
1194         int ret;
1195
1196         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1197                 return -EBUSY;
1198
1199         if (p < mmu->memsize)
1200                 ret = intmem_read(mmu, buf, count, &offset);
1201         else
1202                 ret = exmem_read(mmu, buf, count, &offset);
1203
1204         omap_mmu_mem_disable(mmu, vadr);
1205
1206         return ret;
1207 }
1208
1209 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1210                             loff_t *ppos)
1211 {
1212         unsigned long p = *ppos;
1213         void *vadr = omap_mmu_to_virt(mmu, p);
1214         ssize_t size = mmu->memsize;
1215         ssize_t written;
1216
1217         if (p >= size)
1218                 return 0;
1219         clk_enable(mmu->memclk);
1220         written = count;
1221         if (count > size - p)
1222                 written = size - p;
1223         if (copy_from_user(vadr, buf, written)) {
1224                 written = -EFAULT;
1225                 goto out;
1226         }
1227         *ppos += written;
1228 out:
1229         clk_disable(mmu->memclk);
1230         return written;
1231 }
1232
1233 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1234                            loff_t *ppos)
1235 {
1236         unsigned long p = *ppos;
1237         void *vadr = omap_mmu_to_virt(mmu, p);
1238
1239         if (!exmap_valid(mmu, vadr, count)) {
1240                 dev_err(&mmu->dev, "MMU %s: external device address %08lx "
1241                         "/ size %08x is not valid!\n", mmu->name, p, count);
1242                 return -EFAULT;
1243         }
1244         if (count > (1 << mmu->addrspace) - p)
1245                 count = (1 << mmu->addrspace) - p;
1246         if (copy_from_user(vadr, buf, count))
1247                 return -EFAULT;
1248         *ppos += count;
1249
1250         return count;
1251 }
1252
1253 static ssize_t omap_mmu_mem_write(struct kobject *kobj,
1254                                   struct bin_attribute * attr,
1255                                   char *buf, loff_t offset, size_t count)
1256 {
1257         struct device *dev = to_dev(kobj);
1258         struct omap_mmu *mmu = dev_get_drvdata(dev);
1259         unsigned long p = (unsigned long)offset;
1260         void *vadr = omap_mmu_to_virt(mmu, p);
1261         int ret;
1262
1263         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1264                 return -EBUSY;
1265
1266         if (p < mmu->memsize)
1267                 ret = intmem_write(mmu, buf, count, &offset);
1268         else
1269                 ret = exmem_write(mmu, buf, count, &offset);
1270
1271         omap_mmu_mem_disable(mmu, vadr);
1272
1273         return ret;
1274 }
1275
1276 static struct bin_attribute dev_attr_mem = {
1277         .attr   = {
1278                 .name   = "mem",
1279                 .owner  = THIS_MODULE,
1280                 .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
1281         },
1282
1283         .read   = omap_mmu_mem_read,
1284         .write  = omap_mmu_mem_write,
1285 };
1286
1287 /* To be obsolete for backward compatibility */
1288 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu,
1289                             struct bin_attribute * attr,
1290                             char *buf, loff_t offset, size_t count)
1291 {
1292         return omap_mmu_mem_read(&mmu->dev.kobj, attr, buf, offset, count);
1293 }
1294 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1295
1296 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu,
1297                              struct bin_attribute * attr,
1298                              char *buf, loff_t offset, size_t count)
1299 {
1300         return omap_mmu_mem_write(&mmu->dev.kobj, attr, buf, offset, count);
1301 }
1302 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1303
1304 /*
1305  * sysfs files
1306  */
1307 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1308                              char *buf)
1309 {
1310         struct omap_mmu *mmu = dev_get_drvdata(dev);
1311         struct omap_mmu_tlb_lock tlb_lock;
1312         int ret = -EIO;
1313
1314         clk_enable(mmu->clk);
1315         omap_dsp_request_mem();
1316
1317         down_read(&mmu->exmap_sem);
1318
1319         omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1320
1321         if (likely(mmu->ops->show))
1322                 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1323
1324         /* restore victim entry */
1325         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1326
1327         up_read(&mmu->exmap_sem);
1328         omap_dsp_release_mem();
1329         clk_disable(mmu->clk);
1330
1331         return ret;
1332 }
1333
1334 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1335
1336 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1337                           char *buf)
1338 {
1339         struct omap_mmu *mmu = dev_get_drvdata(dev);
1340         struct exmap_tbl *ent;
1341         int len;
1342         int i = 0;
1343
1344         down_read(&mmu->exmap_sem);
1345         len = sprintf(buf, "  devadr     size         buf     size uc\n");
1346                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
1347
1348         omap_mmu_for_each_tlb_entry(mmu, ent) {
1349                 void *vadr;
1350                 unsigned long size;
1351                 enum exmap_type type;
1352                 int idx;
1353
1354                 /* find a top of link */
1355                 if (!ent->valid || (ent->link.prev >= 0))
1356                         continue;
1357
1358                 vadr = ent->vadr;
1359                 type = ent->type;
1360                 size = 0;
1361                 idx = i;
1362                 do {
1363                         ent = mmu->exmap_tbl + idx;
1364                         size += PAGE_SIZE << ent->order;
1365                 } while ((idx = ent->link.next) >= 0);
1366
1367                 len += sprintf(buf + len, "0x%06lx %#8lx",
1368                                virt_to_omap_mmu(mmu, vadr), size);
1369
1370                 if (type == EXMAP_TYPE_FB) {
1371                         len += sprintf(buf + len, "    framebuf\n");
1372                 } else {
1373                         len += sprintf(buf + len, "\n");
1374                         idx = i;
1375                         do {
1376                                 ent = mmu->exmap_tbl + idx;
1377                                 len += sprintf(buf + len,
1378                                                /* 0xc0171000 0x100000  0*/
1379                                                "%19s0x%8p %#8lx %2d\n",
1380                                                "", ent->buf,
1381                                                PAGE_SIZE << ent->order,
1382                                                ent->usecount);
1383                         } while ((idx = ent->link.next) >= 0);
1384                 }
1385
1386                 i++;
1387         }
1388
1389         up_read(&mmu->exmap_sem);
1390         return len;
1391 }
1392
1393 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1394                            const char *buf,
1395                            size_t count)
1396 {
1397         struct omap_mmu *mmu = dev_get_drvdata(dev);
1398         unsigned long base = 0, len = 0;
1399         int ret;
1400
1401         sscanf(buf, "%lx %lx", &base, &len);
1402
1403         if (!base)
1404                 return -EINVAL;
1405
1406         if (len) {
1407                 /* Add the mapping */
1408                 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1409                 if (ret < 0)
1410                         return ret;
1411         } else {
1412                 /* Remove the mapping */
1413                 ret = omap_mmu_exunmap(mmu, base);
1414                 if (ret < 0)
1415                         return ret;
1416         }
1417
1418         return count;
1419 }
1420
1421 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1422
1423 static ssize_t mempool_show(struct class *class, char *buf)
1424 {
1425         int min_nr_1M = 0, curr_nr_1M = 0;
1426         int min_nr_64K = 0, curr_nr_64K = 0;
1427         int total = 0;
1428
1429         if (likely(mempool_1M)) {
1430                 min_nr_1M  = mempool_1M->min_nr;
1431                 curr_nr_1M = mempool_1M->curr_nr;
1432                 total += min_nr_1M * SZ_1M;
1433         }
1434         if (likely(mempool_64K)) {
1435                 min_nr_64K  = mempool_64K->min_nr;
1436                 curr_nr_64K = mempool_64K->curr_nr;
1437                 total += min_nr_64K * SZ_64K;
1438         }
1439
1440         return sprintf(buf,
1441                        "0x%x\n"
1442                        "1M  buffer: %d (%d free)\n"
1443                        "64K buffer: %d (%d free)\n",
1444                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1445 }
1446
1447
1448 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1449
1450 static void omap_mmu_class_dev_release(struct device *dev)
1451 {
1452 }
1453
1454 static struct class omap_mmu_class = {
1455         .name           = "mmu",
1456         .dev_release    = omap_mmu_class_dev_release,
1457 };
1458
1459 int omap_mmu_register(struct omap_mmu *mmu)
1460 {
1461         int ret;
1462
1463         mmu->dev.class = &omap_mmu_class;
1464         strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1465         dev_set_drvdata(&mmu->dev, mmu);
1466
1467         mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1468                                  GFP_KERNEL);
1469         if (!mmu->exmap_tbl)
1470                 return -ENOMEM;
1471
1472         mmu->twl_mm = mm_alloc();
1473         if (!mmu->twl_mm) {
1474                 ret = -ENOMEM;
1475                 goto err_mm_alloc;
1476         }
1477
1478         ret = device_register(&mmu->dev);
1479         if (unlikely(ret))
1480                 goto err_dev_register;
1481
1482         init_rwsem(&mmu->exmap_sem);
1483
1484         ret = omap_mmu_init(mmu);
1485         if (unlikely(ret))
1486                 goto err_mmu_init;
1487
1488         ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1489         if (unlikely(ret))
1490                 goto err_dev_create_mmu;
1491         ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1492         if (unlikely(ret))
1493                 goto err_dev_create_exmap;
1494
1495         if (likely(mmu->membase)) {
1496                 dev_attr_mem.size = mmu->memsize;
1497                 ret = device_create_bin_file(&mmu->dev,
1498                                              &dev_attr_mem);
1499                 if (unlikely(ret))
1500                         goto err_bin_create_mem;
1501         }
1502
1503         return 0;
1504
1505 err_bin_create_mem:
1506         device_remove_file(&mmu->dev, &dev_attr_exmap);
1507 err_dev_create_exmap:
1508         device_remove_file(&mmu->dev, &dev_attr_mmu);
1509 err_dev_create_mmu:
1510         omap_mmu_shutdown(mmu);
1511 err_mmu_init:
1512         device_unregister(&mmu->dev);
1513 err_dev_register:
1514         kfree(mmu->twl_mm);
1515         mmu->twl_mm = NULL;
1516 err_mm_alloc:
1517         kfree(mmu->exmap_tbl);
1518         mmu->exmap_tbl = NULL;
1519         return ret;
1520 }
1521 EXPORT_SYMBOL_GPL(omap_mmu_register);
1522
1523 void omap_mmu_unregister(struct omap_mmu *mmu)
1524 {
1525         omap_mmu_shutdown(mmu);
1526         omap_mmu_kmem_release();
1527
1528         device_remove_file(&mmu->dev, &dev_attr_mmu);
1529         device_remove_file(&mmu->dev, &dev_attr_exmap);
1530
1531         if (likely(mmu->membase))
1532                 device_remove_bin_file(&mmu->dev,
1533                                              &dev_attr_mem);
1534
1535         kfree(mmu->exmap_tbl);
1536         mmu->exmap_tbl = NULL;
1537
1538         if (mmu->twl_mm) {
1539                 __mmdrop(mmu->twl_mm);
1540                 mmu->twl_mm = NULL;
1541         }
1542
1543         device_unregister(&mmu->dev);
1544 }
1545 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1546
1547 static int __init omap_mmu_class_init(void)
1548 {
1549         int ret = class_register(&omap_mmu_class);
1550         if (!ret)
1551                 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1552
1553         return ret;
1554 }
1555
1556 static void __exit omap_mmu_class_exit(void)
1557 {
1558         class_remove_file(&omap_mmu_class, &class_attr_mempool);
1559         class_unregister(&omap_mmu_class);
1560 }
1561
1562 subsys_initcall(omap_mmu_class_init);
1563 module_exit(omap_mmu_class_exit);
1564
1565 MODULE_LICENSE("GPL");