]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/mmu.c
ARM: OMAP: Default configuration file for 3430 SDP
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
1 /*
2  * linux/arch/arm/plat-omap/mmu.c
3  *
4  * OMAP MMU management framework
5  *
6  * Copyright (C) 2002-2006 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <lethal@linux-sh.org>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  */
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/clk.h>
32 #include <linux/device.h>
33 #include <linux/interrupt.h>
34 #include <asm/uaccess.h>
35 #include <asm/io.h>
36 #include <asm/pgalloc.h>
37 #include <asm/pgtable.h>
38 #include <asm/arch/mmu.h>
39 #include <asm/sizes.h>
40
41 #if defined(CONFIG_ARCH_OMAP1)
42 #include "../mach-omap1/mmu.h"
43 #elif defined(CONFIG_ARCH_OMAP2)
44 #include "../mach-omap2/mmu.h"
45 #endif
46
47 /*
48  * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
49  * MMU has base and victim implemented in different bits in the LOCK
50  * register (shifts are still the same), all of the other registers are
51  * the same on all of the MMUs..
52  */
53 #define MMU_LOCK_BASE_SHIFT             10
54 #define MMU_LOCK_VICTIM_SHIFT           4
55
56 #define CAMERA_MMU_LOCK_BASE_MASK       (0x7 << MMU_LOCK_BASE_SHIFT)
57 #define CAMERA_MMU_LOCK_VICTIM_MASK     (0x7 << MMU_LOCK_VICTIM_SHIFT)
58
59 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
60 #define ORDER_1MB       (20 - PAGE_SHIFT)
61 #define ORDER_64KB      (16 - PAGE_SHIFT)
62 #define ORDER_4KB       (12 - PAGE_SHIFT)
63
64 #define MMU_CNTL_EMUTLBUPDATE   (1<<3)
65 #define MMU_CNTL_TWLENABLE      (1<<2)
66 #define MMU_CNTL_MMUENABLE      (1<<1)
67
68 static mempool_t *mempool_1M;
69 static mempool_t *mempool_64K;
70
71 #define omap_mmu_for_each_tlb_entry(mmu, entry)                 \
72         for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
73              entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
74              entry++)
75
76 #define to_dev(obj)     container_of(obj, struct device, kobj)
77
78 static void *mempool_alloc_from_pool(mempool_t *pool,
79                                      unsigned int __nocast gfp_mask)
80 {
81         spin_lock_irq(&pool->lock);
82         if (likely(pool->curr_nr)) {
83                 void *element = pool->elements[--pool->curr_nr];
84                 spin_unlock_irq(&pool->lock);
85                 return element;
86         }
87
88         spin_unlock_irq(&pool->lock);
89         return mempool_alloc(pool, gfp_mask);
90 }
91
92 /*
93  * kmem_reserve(), kmem_release():
94  * reserve or release kernel memory for exmap().
95  *
96  * exmap() might request consecutive 1MB or 64kB,
97  * but it will be difficult after memory pages are fragmented.
98  * So, user can reserve such memory blocks in the early phase
99  * through kmem_reserve().
100  */
101 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
102 {
103         return (void *)__get_dma_pages(gfp, (unsigned int)order);
104 }
105
106 static void omap_mmu_pool_free(void *buf, void *order)
107 {
108         free_pages((unsigned long)buf, (unsigned int)order);
109 }
110
111 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
112 {
113         unsigned long len = size;
114
115         /* alignment check */
116         if (!is_aligned(size, SZ_64K)) {
117                 printk(KERN_ERR
118                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
119                 return -EINVAL;
120         }
121
122         if (size > (1 << mmu->addrspace)) {
123                 printk(KERN_ERR
124                        "omapdsp: size(0x%lx) is larger than DSP memory space "
125                        "size (0x%x.\n", size, (1 << mmu->addrspace));
126                 return -EINVAL;
127         }
128
129         if (size >= SZ_1M) {
130                 int nr = size >> 20;
131
132                 if (likely(!mempool_1M))
133                         mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
134                                                     omap_mmu_pool_free,
135                                                     (void *)ORDER_1MB);
136                 else
137                         mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
138                                        GFP_KERNEL);
139
140                 size &= ~(0xf << 20);
141         }
142
143         if (size >= SZ_64K) {
144                 int nr = size >> 16;
145
146                 if (likely(!mempool_64K))
147                         mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
148                                                      omap_mmu_pool_free,
149                                                      (void *)ORDER_64KB);
150                 else
151                         mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
152                                        GFP_KERNEL);
153
154                 size &= ~(0xf << 16);
155         }
156
157         if (size)
158                 len -= size;
159
160         return len;
161 }
162 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
163
164 void omap_mmu_kmem_release(void)
165 {
166         if (mempool_64K) {
167                 mempool_destroy(mempool_64K);
168                 mempool_64K = NULL;
169         }
170
171         if (mempool_1M) {
172                 mempool_destroy(mempool_1M);
173                 mempool_1M = NULL;
174         }
175 }
176 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
177
178 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
179 {
180         struct page *page, *ps, *pe;
181
182         ps = virt_to_page(buf);
183         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
184
185         for (page = ps; page < pe; page++)
186                 ClearPageReserved(page);
187
188         if ((order == ORDER_64KB) && likely(mempool_64K))
189                 mempool_free((void *)buf, mempool_64K);
190         else if ((order == ORDER_1MB) && likely(mempool_1M))
191                 mempool_free((void *)buf, mempool_1M);
192         else
193                 free_pages(buf, order);
194 }
195
196 /*
197  * ARM MMU operations
198  */
199 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
200 {
201         long off;
202         unsigned long sz_left;
203         pmd_t *pmdp;
204         pte_t *ptep;
205         int prot_pmd, prot_pte;
206
207         printk(KERN_DEBUG
208                "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
209                virt, phys, size);
210
211         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
212         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
213
214         pmdp = pmd_offset(pgd_offset_k(virt), virt);
215         if (pmd_none(*pmdp)) {
216                 ptep = pte_alloc_one_kernel(&init_mm, 0);
217                 if (ptep == NULL)
218                         return -ENOMEM;
219                 /* note: two PMDs will be set  */
220                 pmd_populate_kernel(&init_mm, pmdp, ptep);
221         }
222
223         off = phys - virt;
224         for (sz_left = size;
225              sz_left >= PAGE_SIZE;
226              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
227                 ptep = pte_offset_kernel(pmdp, virt);
228                 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
229         }
230         if (sz_left)
231                 BUG();
232
233         return 0;
234 }
235 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
236
237 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
238 {
239         unsigned long sz_left;
240         pmd_t *pmdp;
241         pte_t *ptep;
242
243         printk(KERN_DEBUG
244                "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
245                virt, size);
246
247         for (sz_left = size;
248              sz_left >= PAGE_SIZE;
249              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
250                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
251                 ptep = pte_offset_kernel(pmdp, virt);
252                 pte_clear(&init_mm, virt, ptep);
253         }
254         if (sz_left)
255                 BUG();
256 }
257 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
258
259 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
260 {
261         /* exmap_sem should be held before calling this function */
262         struct exmap_tbl *ent;
263
264 start:
265         omap_mmu_for_each_tlb_entry(mmu, ent) {
266                 void *mapadr;
267                 unsigned long mapsize;
268
269                 if (!ent->valid)
270                         continue;
271                 mapadr = (void *)ent->vadr;
272                 mapsize = 1 << (ent->order + PAGE_SHIFT);
273                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
274                         if (vadr + len <= mapadr + mapsize) {
275                                 /* this map covers whole address. */
276                                 return 1;
277                         } else {
278                                 /*
279                                  * this map covers partially.
280                                  * check rest portion.
281                                  */
282                                 len -= mapadr + mapsize - vadr;
283                                 vadr = mapadr + mapsize;
284                                 goto start;
285                         }
286                 }
287         }
288
289         return 0;
290 }
291 EXPORT_SYMBOL_GPL(exmap_valid);
292
293 /*
294  * omap_mmu_exmap_use(), unuse():
295  * when the mapped area is exported to user space with mmap,
296  * the usecount is incremented.
297  * while the usecount > 0, that area can't be released.
298  */
299 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
300 {
301         struct exmap_tbl *ent;
302
303         down_write(&mmu->exmap_sem);
304         omap_mmu_for_each_tlb_entry(mmu, ent) {
305                 void *mapadr;
306                 unsigned long mapsize;
307
308                 if (!ent->valid)
309                         continue;
310                 mapadr = (void *)ent->vadr;
311                 mapsize = 1 << (ent->order + PAGE_SHIFT);
312                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
313                         ent->usecount++;
314         }
315         up_write(&mmu->exmap_sem);
316 }
317 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
318
319 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
320 {
321         struct exmap_tbl *ent;
322
323         down_write(&mmu->exmap_sem);
324         omap_mmu_for_each_tlb_entry(mmu, ent) {
325                 void *mapadr;
326                 unsigned long mapsize;
327
328                 if (!ent->valid)
329                         continue;
330                 mapadr = (void *)ent->vadr;
331                 mapsize = 1 << (ent->order + PAGE_SHIFT);
332                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
333                         ent->usecount--;
334         }
335         up_write(&mmu->exmap_sem);
336 }
337 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
338
339 /*
340  * omap_mmu_virt_to_phys()
341  * returns physical address, and sets len to valid length
342  */
343 unsigned long
344 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
345 {
346         struct exmap_tbl *ent;
347
348         if (omap_mmu_internal_memory(mmu, vadr)) {
349                 unsigned long addr = (unsigned long)vadr;
350                 *len = mmu->membase + mmu->memsize - addr;
351                 return addr;
352         }
353
354         /* EXRAM */
355         omap_mmu_for_each_tlb_entry(mmu, ent) {
356                 void *mapadr;
357                 unsigned long mapsize;
358
359                 if (!ent->valid)
360                         continue;
361                 mapadr = (void *)ent->vadr;
362                 mapsize = 1 << (ent->order + PAGE_SHIFT);
363                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
364                         *len = mapadr + mapsize - vadr;
365                         return __pa(ent->buf) + vadr - mapadr;
366                 }
367         }
368
369         /* valid mapping not found */
370         return 0;
371 }
372 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
373
374 /*
375  * PTE operations
376  */
377 static inline void
378 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
379                        unsigned long phys, int prot)
380 {
381         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
382         if (virt & (1 << SECTION_SHIFT))
383                 pmdp++;
384         *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
385         flush_pmd_entry(pmdp);
386 }
387
388 static inline void
389 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
390                             unsigned long phys, int prot)
391 {
392         int i;
393         for (i = 0; i < 16; i += 1) {
394                 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
395                 virt += (PGDIR_SIZE / 2);
396         }
397 }
398
399 static inline int
400 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
401                     unsigned long phys, pgprot_t prot)
402 {
403         pte_t *ptep;
404         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
405
406         if (!(prot & PTE_TYPE_MASK))
407                 prot |= PTE_TYPE_SMALL;
408
409         if (pmd_none(*pmdp)) {
410                 ptep = pte_alloc_one_kernel(mm, virt);
411                 if (ptep == NULL)
412                         return -ENOMEM;
413                 pmd_populate_kernel(mm, pmdp, ptep);
414         }
415         ptep = pte_offset_kernel(pmdp, virt);
416         ptep -= PTRS_PER_PTE;
417         *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
418         flush_pmd_entry((pmd_t *)ptep);
419         return 0;
420 }
421
422 static inline int
423 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
424                          unsigned long phys, pgprot_t prot)
425 {
426         int i, ret;
427         for (i = 0; i < 16; i += 1) {
428                 ret = omap_mmu_alloc_page(mm, virt, phys,
429                                           prot | PTE_TYPE_LARGE);
430                 if (ret)
431                         return -ENOMEM; /* only 1st time */
432                 virt += PAGE_SIZE;
433         }
434         return 0;
435 }
436
437 static int omap_mmu_load_pte(struct omap_mmu *mmu,
438                              struct omap_mmu_tlb_entry *e)
439 {
440         int ret = 0;
441         struct mm_struct *mm = mmu->twl_mm;
442         const unsigned long va = e->va;
443         const unsigned long pa = e->pa;
444         const pgprot_t prot = mmu->ops->pte_get_attr(e);
445
446         spin_lock(&mm->page_table_lock);
447
448         switch (e->pgsz) {
449         case OMAP_MMU_CAM_PAGESIZE_16MB:
450                 omap_mmu_alloc_supersection(mm, va, pa, prot);
451                 break;
452         case OMAP_MMU_CAM_PAGESIZE_1MB:
453                 omap_mmu_alloc_section(mm, va, pa, prot);
454                 break;
455         case OMAP_MMU_CAM_PAGESIZE_64KB:
456                 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
457                 break;
458         case OMAP_MMU_CAM_PAGESIZE_4KB:
459                 ret = omap_mmu_alloc_page(mm, va, pa, prot);
460                 break;
461         default:
462                 BUG();
463                 break;
464         }
465
466         spin_unlock(&mm->page_table_lock);
467
468         return ret;
469 }
470
471 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
472 {
473         pte_t *ptep, *end;
474         pmd_t *pmdp;
475         struct mm_struct *mm = mmu->twl_mm;
476
477         spin_lock(&mm->page_table_lock);
478
479         pmdp = pmd_offset(pgd_offset(mm, virt), virt);
480
481         if (pmd_none(*pmdp))
482                 goto out;
483
484         if (!pmd_table(*pmdp))
485                 goto invalidate_pmd;
486
487         ptep = pte_offset_kernel(pmdp, virt);
488         pte_clear(mm, virt, ptep);
489         flush_pmd_entry((pmd_t *)ptep);
490
491         /* zap pte */
492         end = pmd_page_vaddr(*pmdp);
493         ptep = end - PTRS_PER_PTE;
494         while (ptep < end) {
495                 if (!pte_none(*ptep))
496                         goto out;
497                 ptep++;
498         }
499         pte_free_kernel(pmd_page_vaddr(*pmdp));
500
501  invalidate_pmd:
502         pmd_clear(pmdp);
503         flush_pmd_entry(pmdp);
504  out:
505         spin_unlock(&mm->page_table_lock);
506 }
507
508 /*
509  * TLB operations
510  */
511 static struct cam_ram_regset *
512 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
513 {
514         return mmu->ops->cam_ram_alloc(entry);
515 }
516
517 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
518                                   struct cam_ram_regset *cr)
519 {
520         return mmu->ops->cam_ram_valid(cr);
521 }
522
523 static inline void
524 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
525 {
526         unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
527         int mask;
528
529         mask = (mmu->type == OMAP_MMU_CAMERA) ?
530                         CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
531         tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
532
533         mask = (mmu->type == OMAP_MMU_CAMERA) ?
534                         CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
535         tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
536 }
537
538 static inline void
539 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
540 {
541         omap_mmu_write_reg(mmu,
542                            (lock->base << MMU_LOCK_BASE_SHIFT) |
543                            (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
544 }
545
546 static inline void omap_mmu_flush(struct omap_mmu *mmu)
547 {
548         omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
549 }
550
551 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
552 {
553         omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
554 }
555
556 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
557                        struct cam_ram_regset *cr)
558 {
559         /* set victim */
560         omap_mmu_set_tlb_lock(mmu, lock);
561
562         if (likely(mmu->ops->read_tlb))
563                 mmu->ops->read_tlb(mmu, cr);
564 }
565 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
566
567 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
568 {
569         if (likely(mmu->ops->load_tlb))
570                 mmu->ops->load_tlb(mmu, cr);
571
572         /* flush the entry */
573         omap_mmu_flush(mmu);
574
575         /* load a TLB entry */
576         omap_mmu_ldtlb(mmu);
577 }
578
579 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
580                             struct omap_mmu_tlb_entry *entry)
581 {
582         struct omap_mmu_tlb_lock lock;
583         struct cam_ram_regset *cr;
584
585         clk_enable(mmu->clk);
586         omap_dsp_request_mem();
587
588         omap_mmu_get_tlb_lock(mmu, &lock);
589         for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
590                 struct cam_ram_regset tmp;
591
592                 /* read a TLB entry */
593                 omap_mmu_read_tlb(mmu, &lock, &tmp);
594                 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
595                         goto found_victim;
596         }
597         omap_mmu_set_tlb_lock(mmu, &lock);
598
599 found_victim:
600         /* The last entry cannot be locked? */
601         if (lock.victim == (mmu->nr_tlb_entries - 1)) {
602                 printk(KERN_ERR "MMU: TLB is full.\n");
603                 return -EBUSY;
604         }
605
606         cr = omap_mmu_cam_ram_alloc(mmu, entry);
607         if (IS_ERR(cr))
608                 return PTR_ERR(cr);
609
610         omap_mmu_load_tlb(mmu, cr);
611         kfree(cr);
612
613         /* update lock base */
614         if (lock.victim == lock.base)
615                 lock.base++;
616
617         omap_mmu_set_tlb_lock(mmu, &lock);
618
619         omap_dsp_release_mem();
620         clk_disable(mmu->clk);
621         return 0;
622 }
623 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
624
625 static inline unsigned long
626 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
627 {
628         return mmu->ops->cam_va(cr);
629 }
630
631 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
632 {
633         struct omap_mmu_tlb_lock lock;
634         int i;
635         int max_valid = 0;
636
637         clk_enable(mmu->clk);
638         omap_dsp_request_mem();
639
640         omap_mmu_get_tlb_lock(mmu, &lock);
641         for (i = 0; i < lock.base; i++) {
642                 struct cam_ram_regset cr;
643
644                 /* read a TLB entry */
645                 lock.victim = i;
646                 omap_mmu_read_tlb(mmu, &lock, &cr);
647                 if (!omap_mmu_cam_ram_valid(mmu, &cr))
648                         continue;
649
650                 if (omap_mmu_cam_va(mmu, &cr) == vadr)
651                         /* flush the entry */
652                         omap_mmu_flush(mmu);
653                 else
654                         max_valid = i;
655         }
656
657         /* set new lock base */
658         lock.base = lock.victim = max_valid + 1;
659         omap_mmu_set_tlb_lock(mmu, &lock);
660
661         omap_dsp_release_mem();
662         clk_disable(mmu->clk);
663         return 0;
664 }
665 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
666
667 static void omap_mmu_gflush(struct omap_mmu *mmu)
668 {
669         struct omap_mmu_tlb_lock lock;
670
671         clk_enable(mmu->clk);
672         omap_dsp_request_mem();
673
674         omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
675         lock.base = lock.victim = mmu->nr_exmap_preserved;
676         omap_mmu_set_tlb_lock(mmu, &lock);
677
678         omap_dsp_release_mem();
679         clk_disable(mmu->clk);
680 }
681
682 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
683                             struct omap_mmu_tlb_entry *entry)
684 {
685         int ret = -1;
686         if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
687                 /*XXX use PG_flag for prsvd */
688                 ret = omap_mmu_load_pte(mmu, entry);
689                 if (ret)
690                         return ret;
691         }
692         if (entry->tlb)
693                 ret = omap_mmu_load_tlb_entry(mmu, entry);
694         return ret;
695 }
696 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
697
698 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
699 {
700         int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
701         if (ret)
702                 return ret;
703         if (mmu->ops->pte_get_attr)
704                 omap_mmu_clear_pte(mmu, vadr);
705         return ret;
706 }
707 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
708
709 /*
710  * omap_mmu_exmap()
711  *
712  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
713  * In this case, the buffer for DSP is allocated in this routine,
714  * then it is mapped.
715  * On the other hand, for example - frame buffer sharing, calls
716  * this function with padr set. It means some known address space
717  * pointed with padr is going to be shared with DSP.
718  */
719 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
720                    unsigned long padr, unsigned long size,
721                    enum exmap_type type)
722 {
723         unsigned long pgsz;
724         void *buf;
725         unsigned int order = 0;
726         unsigned long unit;
727         int prev = -1;
728         unsigned long _dspadr = dspadr;
729         unsigned long _padr = padr;
730         void *_vadr = omap_mmu_to_virt(mmu, dspadr);
731         unsigned long _size = size;
732         struct omap_mmu_tlb_entry tlb_ent;
733         struct exmap_tbl *exmap_ent, *tmp_ent;
734         int status;
735         int idx;
736
737 #define MINIMUM_PAGESZ  SZ_4K
738         /*
739          * alignment check
740          */
741         if (!is_aligned(size, MINIMUM_PAGESZ)) {
742                 printk(KERN_ERR
743                        "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
744                 return -EINVAL;
745         }
746         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
747                 printk(KERN_ERR
748                        "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
749                 return -EINVAL;
750         }
751         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
752                 printk(KERN_ERR
753                        "MMU: physical address(0x%lx) is not aligned.\n",
754                        padr);
755                 return -EINVAL;
756         }
757
758         /* address validity check */
759         if ((dspadr < mmu->memsize) ||
760             (dspadr >= (1 << mmu->addrspace))) {
761                 printk(KERN_ERR
762                        "MMU: illegal address/size for %s().\n",
763                        __FUNCTION__);
764                 return -EINVAL;
765         }
766
767         down_write(&mmu->exmap_sem);
768
769         /* overlap check */
770         omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
771                 unsigned long mapsize;
772
773                 if (!tmp_ent->valid)
774                         continue;
775                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
776                 if ((_vadr + size > tmp_ent->vadr) &&
777                     (_vadr < tmp_ent->vadr + mapsize)) {
778                         printk(KERN_ERR "MMU: exmap page overlap!\n");
779                         up_write(&mmu->exmap_sem);
780                         return -EINVAL;
781                 }
782         }
783
784 start:
785         buf = NULL;
786         /* Are there any free TLB lines?  */
787         for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
788                 if (!mmu->exmap_tbl[idx].valid)
789                         goto found_free;
790
791         printk(KERN_ERR "MMU: DSP TLB is full.\n");
792         status = -EBUSY;
793         goto fail;
794
795 found_free:
796         exmap_ent = mmu->exmap_tbl + idx;
797
798         if ((_size >= SZ_1M) &&
799             (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
800             is_aligned(_dspadr, SZ_1M)) {
801                 unit = SZ_1M;
802                 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
803         } else if ((_size >= SZ_64K) &&
804                    (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
805                    is_aligned(_dspadr, SZ_64K)) {
806                 unit = SZ_64K;
807                 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
808         } else {
809                 unit = SZ_4K;
810                 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
811         }
812
813         order = get_order(unit);
814
815         /* buffer allocation */
816         if (type == EXMAP_TYPE_MEM) {
817                 struct page *page, *ps, *pe;
818
819                 if ((order == ORDER_1MB) && likely(mempool_1M))
820                         buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
821                 else if ((order == ORDER_64KB) && likely(mempool_64K))
822                         buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
823                 else {
824                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
825                         if (buf == NULL) {
826                                 status = -ENOMEM;
827                                 goto fail;
828                         }
829                 }
830
831                 /* mark the pages as reserved; this is needed for mmap */
832                 ps = virt_to_page(buf);
833                 pe = virt_to_page(buf + unit);
834
835                 for (page = ps; page < pe; page++)
836                         SetPageReserved(page);
837
838                 _padr = __pa(buf);
839         }
840
841         /*
842          * mapping for ARM MMU:
843          * we should not access to the allocated memory through 'buf'
844          * since this area should not be cached.
845          */
846         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
847         if (status < 0)
848                 goto fail;
849
850         /* loading DSP PTE entry */
851         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
852         status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
853         if (status < 0) {
854                 exmap_clear_armmmu((unsigned long)_vadr, unit);
855                 goto fail;
856         }
857
858         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
859         exmap_ent->link.prev = prev;
860         if (prev >= 0)
861                 mmu->exmap_tbl[prev].link.next = idx;
862
863         if ((_size -= unit) == 0) {     /* normal completion */
864                 up_write(&mmu->exmap_sem);
865                 return size;
866         }
867
868         _dspadr += unit;
869         _vadr   += unit;
870         _padr = padr ? _padr + unit : 0;
871         prev = idx;
872         goto start;
873
874 fail:
875         up_write(&mmu->exmap_sem);
876         if (buf)
877                 omap_mmu_free_pages((unsigned long)buf, order);
878         omap_mmu_exunmap(mmu, dspadr);
879         return status;
880 }
881 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
882
883 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
884 {
885         unsigned long size;
886
887         /* clearing ARM MMU */
888         size = 1 << (ent->order + PAGE_SHIFT);
889         exmap_clear_armmmu((unsigned long)ent->vadr, size);
890
891         /* freeing allocated memory */
892         if (ent->type == EXMAP_TYPE_MEM) {
893                 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
894                 printk(KERN_DEBUG
895                        "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
896                        size, ent->buf);
897         }
898
899         ent->valid = 0;
900         return size;
901 }
902
903 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
904 {
905         void *vadr;
906         unsigned long size;
907         int total = 0;
908         struct exmap_tbl *ent;
909         int idx;
910
911         vadr = omap_mmu_to_virt(mmu, dspadr);
912         down_write(&mmu->exmap_sem);
913         for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
914                 ent = mmu->exmap_tbl + idx;
915                 if (!ent->valid || ent->prsvd)
916                         continue;
917                 if (ent->vadr == vadr)
918                         goto found_map;
919         }
920         up_write(&mmu->exmap_sem);
921         printk(KERN_WARNING
922                "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
923         return -EINVAL;
924
925 found_map:
926         if (ent->usecount > 0) {
927                 printk(KERN_ERR
928                        "MMU: exmap reference count is not 0.\n"
929                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
930                        idx, ent->vadr, ent->order, ent->usecount);
931                 up_write(&mmu->exmap_sem);
932                 return -EINVAL;
933         }
934         /* clearing DSP PTE entry */
935         omap_mmu_clear_pte_entry(mmu, dspadr);
936
937         /* clear ARM MMU and free buffer */
938         size = unmap_free_arm(ent);
939         total += size;
940
941         /* we don't free PTEs */
942
943         /* flush TLB */
944         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
945
946         /* check if next mapping is in same group */
947         idx = ent->link.next;
948         if (idx < 0)
949                 goto up_out;    /* normal completion */
950         ent = mmu->exmap_tbl + idx;
951         dspadr += size;
952         vadr   += size;
953         if (ent->vadr == vadr)
954                 goto found_map; /* continue */
955
956         printk(KERN_ERR
957                "MMU: illegal exmap_tbl grouping!\n"
958                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
959                vadr, idx, ent->vadr);
960         up_write(&mmu->exmap_sem);
961         return -EINVAL;
962
963 up_out:
964         up_write(&mmu->exmap_sem);
965         return total;
966 }
967 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
968
969 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
970 {
971         struct exmap_tbl *ent;
972
973         down_write(&mmu->exmap_sem);
974
975         /* clearing TLB entry */
976         omap_mmu_gflush(mmu);
977
978         omap_mmu_for_each_tlb_entry(mmu, ent)
979                 if (ent->valid && !ent->prsvd)
980                         unmap_free_arm(ent);
981
982         /* flush TLB */
983         if (likely(mmu->membase))
984                 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
985                                        mmu->membase + (1 << mmu->addrspace));
986
987         up_write(&mmu->exmap_sem);
988 }
989 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
990
991 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
992                                     unsigned long dspadr, int index)
993 {
994         unsigned long phys;
995         void *virt;
996         struct omap_mmu_tlb_entry tlb_ent;
997
998         phys = __pa(buf);
999         virt = omap_mmu_to_virt(mmu, dspadr);
1000         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1001         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1002         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1003         omap_mmu_load_pte_entry(mmu, &tlb_ent);
1004 }
1005 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1006
1007 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
1008 {
1009         void *virt = omap_mmu_to_virt(mmu, dspadr);
1010
1011         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1012         /* DSP MMU is shutting down. not handled here. */
1013 }
1014 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1015
1016 static void omap_mmu_reset(struct omap_mmu *mmu)
1017 {
1018         int i;
1019
1020         omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
1021
1022         for (i = 0; i < 10000; i++)
1023                 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
1024                         break;
1025 }
1026
1027 void omap_mmu_disable(struct omap_mmu *mmu)
1028 {
1029         omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
1030 }
1031 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1032
1033 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1034 {
1035         u32 val = MMU_CNTL_MMUENABLE;
1036         u32 pa = (u32)virt_to_phys(mmu->twl_mm->pgd);
1037
1038         if (likely(reset))
1039                 omap_mmu_reset(mmu);
1040
1041         if (mmu->ops->pte_get_attr) {
1042                 omap_mmu_write_reg(mmu, pa, MMU_TTB);
1043                 val |= MMU_CNTL_TWLENABLE;
1044         }
1045
1046         omap_mmu_write_reg(mmu, val, MMU_CNTL);
1047 }
1048 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1049
1050 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1051 {
1052         struct omap_mmu *mmu = dev_id;
1053
1054         if (likely(mmu->ops->interrupt))
1055                 mmu->ops->interrupt(mmu);
1056
1057         return IRQ_HANDLED;
1058 }
1059
1060 static int omap_mmu_init(struct omap_mmu *mmu)
1061 {
1062         struct omap_mmu_tlb_lock tlb_lock;
1063         int ret = 0;
1064
1065         clk_enable(mmu->clk);
1066         omap_dsp_request_mem();
1067         down_write(&mmu->exmap_sem);
1068
1069         ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1070                           mmu->name,  mmu);
1071         if (ret < 0) {
1072                 printk(KERN_ERR
1073                        "failed to register MMU interrupt: %d\n", ret);
1074                 goto fail;
1075         }
1076
1077         omap_mmu_disable(mmu);  /* clear all */
1078         udelay(100);
1079         omap_mmu_enable(mmu, 1);
1080
1081         memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1082         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1083
1084         if (unlikely(mmu->ops->startup))
1085                 ret = mmu->ops->startup(mmu);
1086  fail:
1087         up_write(&mmu->exmap_sem);
1088         omap_dsp_release_mem();
1089         clk_disable(mmu->clk);
1090
1091         return ret;
1092 }
1093
1094 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1095 {
1096         free_irq(mmu->irq, mmu);
1097
1098         if (unlikely(mmu->ops->shutdown))
1099                 mmu->ops->shutdown(mmu);
1100
1101         omap_mmu_exmap_flush(mmu);
1102         omap_mmu_disable(mmu); /* clear all */
1103 }
1104
1105 /*
1106  * omap_mmu_mem_enable() / disable()
1107  */
1108 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1109 {
1110         if (unlikely(mmu->ops->mem_enable))
1111                 return mmu->ops->mem_enable(mmu, addr);
1112
1113         down_read(&mmu->exmap_sem);
1114         return 0;
1115 }
1116 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1117
1118 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1119 {
1120         if (unlikely(mmu->ops->mem_disable)) {
1121                 mmu->ops->mem_disable(mmu, addr);
1122                 return;
1123         }
1124
1125         up_read(&mmu->exmap_sem);
1126 }
1127 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1128
1129 /*
1130  * dsp_mem file operations
1131  */
1132 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1133                            loff_t *ppos)
1134 {
1135         unsigned long p = *ppos;
1136         void *vadr = omap_mmu_to_virt(mmu, p);
1137         ssize_t size = mmu->memsize;
1138         ssize_t read;
1139
1140         if (p >= size)
1141                 return 0;
1142         clk_enable(mmu->memclk);
1143         read = count;
1144         if (count > size - p)
1145                 read = size - p;
1146         if (copy_to_user(buf, vadr, read)) {
1147                 read = -EFAULT;
1148                 goto out;
1149         }
1150         *ppos += read;
1151 out:
1152         clk_disable(mmu->memclk);
1153         return read;
1154 }
1155
1156 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1157                           loff_t *ppos)
1158 {
1159         unsigned long p = *ppos;
1160         void *vadr = omap_mmu_to_virt(mmu, p);
1161
1162         if (!exmap_valid(mmu, vadr, count)) {
1163                 printk(KERN_ERR
1164                        "MMU: DSP address %08lx / size %08x "
1165                        "is not valid!\n", p, count);
1166                 return -EFAULT;
1167         }
1168         if (count > (1 << mmu->addrspace) - p)
1169                 count = (1 << mmu->addrspace) - p;
1170         if (copy_to_user(buf, vadr, count))
1171                 return -EFAULT;
1172         *ppos += count;
1173
1174         return count;
1175 }
1176
1177 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1178                                  loff_t offset, size_t count)
1179 {
1180         struct device *dev = to_dev(kobj);
1181         struct omap_mmu *mmu = dev_get_drvdata(dev);
1182         unsigned long p = (unsigned long)offset;
1183         void *vadr = omap_mmu_to_virt(mmu, p);
1184         int ret;
1185
1186         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1187                 return -EBUSY;
1188
1189         if (p < mmu->memsize)
1190                 ret = intmem_read(mmu, buf, count, &offset);
1191         else
1192                 ret = exmem_read(mmu, buf, count, &offset);
1193
1194         omap_mmu_mem_disable(mmu, vadr);
1195
1196         return ret;
1197 }
1198
1199 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1200                             loff_t *ppos)
1201 {
1202         unsigned long p = *ppos;
1203         void *vadr = omap_mmu_to_virt(mmu, p);
1204         ssize_t size = mmu->memsize;
1205         ssize_t written;
1206
1207         if (p >= size)
1208                 return 0;
1209         clk_enable(mmu->memclk);
1210         written = count;
1211         if (count > size - p)
1212                 written = size - p;
1213         if (copy_from_user(vadr, buf, written)) {
1214                 written = -EFAULT;
1215                 goto out;
1216         }
1217         *ppos += written;
1218 out:
1219         clk_disable(mmu->memclk);
1220         return written;
1221 }
1222
1223 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1224                            loff_t *ppos)
1225 {
1226         unsigned long p = *ppos;
1227         void *vadr = omap_mmu_to_virt(mmu, p);
1228
1229         if (!exmap_valid(mmu, vadr, count)) {
1230                 printk(KERN_ERR
1231                        "MMU: DSP address %08lx / size %08x "
1232                        "is not valid!\n", p, count);
1233                 return -EFAULT;
1234         }
1235         if (count > (1 << mmu->addrspace) - p)
1236                 count = (1 << mmu->addrspace) - p;
1237         if (copy_from_user(vadr, buf, count))
1238                 return -EFAULT;
1239         *ppos += count;
1240
1241         return count;
1242 }
1243
1244 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1245                                   loff_t offset, size_t count)
1246 {
1247         struct device *dev = to_dev(kobj);
1248         struct omap_mmu *mmu = dev_get_drvdata(dev);
1249         unsigned long p = (unsigned long)offset;
1250         void *vadr = omap_mmu_to_virt(mmu, p);
1251         int ret;
1252
1253         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1254                 return -EBUSY;
1255
1256         if (p < mmu->memsize)
1257                 ret = intmem_write(mmu, buf, count, &offset);
1258         else
1259                 ret = exmem_write(mmu, buf, count, &offset);
1260
1261         omap_mmu_mem_disable(mmu, vadr);
1262
1263         return ret;
1264 }
1265
1266 static struct bin_attribute dev_attr_mem = {
1267         .attr   = {
1268                 .name   = "mem",
1269                 .owner  = THIS_MODULE,
1270                 .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
1271         },
1272
1273         .read   = omap_mmu_mem_read,
1274         .write  = omap_mmu_mem_write,
1275 };
1276
1277 /* To be obsolete for backward compatibility */
1278 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1279                             loff_t offset, size_t count)
1280 {
1281         return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1282 }
1283 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1284
1285 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1286                              loff_t offset, size_t count)
1287 {
1288         return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1289 }
1290 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1291
1292 /*
1293  * sysfs files
1294  */
1295 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1296                              char *buf)
1297 {
1298         struct omap_mmu *mmu = dev_get_drvdata(dev);
1299         struct omap_mmu_tlb_lock tlb_lock;
1300         int ret = -EIO;
1301
1302         clk_enable(mmu->clk);
1303         omap_dsp_request_mem();
1304
1305         down_read(&mmu->exmap_sem);
1306
1307         omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1308
1309         if (likely(mmu->ops->show))
1310                 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1311
1312         /* restore victim entry */
1313         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1314
1315         up_read(&mmu->exmap_sem);
1316         omap_dsp_release_mem();
1317         clk_disable(mmu->clk);
1318
1319         return ret;
1320 }
1321
1322 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1323
1324 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1325                           char *buf)
1326 {
1327         struct omap_mmu *mmu = dev_get_drvdata(dev);
1328         struct exmap_tbl *ent;
1329         int len;
1330         int i = 0;
1331
1332         down_read(&mmu->exmap_sem);
1333         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
1334                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
1335
1336         omap_mmu_for_each_tlb_entry(mmu, ent) {
1337                 void *vadr;
1338                 unsigned long size;
1339                 enum exmap_type type;
1340                 int idx;
1341
1342                 /* find a top of link */
1343                 if (!ent->valid || (ent->link.prev >= 0))
1344                         continue;
1345
1346                 vadr = ent->vadr;
1347                 type = ent->type;
1348                 size = 0;
1349                 idx = i;
1350                 do {
1351                         ent = mmu->exmap_tbl + idx;
1352                         size += PAGE_SIZE << ent->order;
1353                 } while ((idx = ent->link.next) >= 0);
1354
1355                 len += sprintf(buf + len, "0x%06lx %#8lx",
1356                                virt_to_omap_mmu(mmu, vadr), size);
1357
1358                 if (type == EXMAP_TYPE_FB) {
1359                         len += sprintf(buf + len, "    framebuf\n");
1360                 } else {
1361                         len += sprintf(buf + len, "\n");
1362                         idx = i;
1363                         do {
1364                                 ent = mmu->exmap_tbl + idx;
1365                                 len += sprintf(buf + len,
1366                                                /* 0xc0171000 0x100000  0*/
1367                                                "%19s0x%8p %#8lx %2d\n",
1368                                                "", ent->buf,
1369                                                PAGE_SIZE << ent->order,
1370                                                ent->usecount);
1371                         } while ((idx = ent->link.next) >= 0);
1372                 }
1373
1374                 i++;
1375         }
1376
1377         up_read(&mmu->exmap_sem);
1378         return len;
1379 }
1380
1381 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1382                            const char *buf,
1383                            size_t count)
1384 {
1385         struct omap_mmu *mmu = dev_get_drvdata(dev);
1386         unsigned long base = 0, len = 0;
1387         int ret;
1388
1389         sscanf(buf, "%lx %lx", &base, &len);
1390
1391         if (!base)
1392                 return -EINVAL;
1393
1394         if (len) {
1395                 /* Add the mapping */
1396                 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1397                 if (ret < 0)
1398                         return ret;
1399         } else {
1400                 /* Remove the mapping */
1401                 ret = omap_mmu_exunmap(mmu, base);
1402                 if (ret < 0)
1403                         return ret;
1404         }
1405
1406         return count;
1407 }
1408
1409 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1410
1411 static ssize_t mempool_show(struct class *class, char *buf)
1412 {
1413         int min_nr_1M = 0, curr_nr_1M = 0;
1414         int min_nr_64K = 0, curr_nr_64K = 0;
1415         int total = 0;
1416
1417         if (likely(mempool_1M)) {
1418                 min_nr_1M  = mempool_1M->min_nr;
1419                 curr_nr_1M = mempool_1M->curr_nr;
1420                 total += min_nr_1M * SZ_1M;
1421         }
1422         if (likely(mempool_64K)) {
1423                 min_nr_64K  = mempool_64K->min_nr;
1424                 curr_nr_64K = mempool_64K->curr_nr;
1425                 total += min_nr_64K * SZ_64K;
1426         }
1427
1428         return sprintf(buf,
1429                        "0x%x\n"
1430                        "1M  buffer: %d (%d free)\n"
1431                        "64K buffer: %d (%d free)\n",
1432                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1433 }
1434
1435
1436 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1437
1438 static void omap_mmu_class_dev_release(struct device *dev)
1439 {
1440 }
1441
1442 static struct class omap_mmu_class = {
1443         .name           = "mmu",
1444         .dev_release    = omap_mmu_class_dev_release,
1445 };
1446
1447 int omap_mmu_register(struct omap_mmu *mmu)
1448 {
1449         int ret;
1450
1451         mmu->dev.class = &omap_mmu_class;
1452         strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1453         dev_set_drvdata(&mmu->dev, mmu);
1454
1455         mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1456                                  GFP_KERNEL);
1457         if (!mmu->exmap_tbl)
1458                 return -ENOMEM;
1459
1460         if (mmu->ops->pte_get_attr) {
1461                 struct mm_struct *mm =  mm_alloc();
1462                 if (!mm) {
1463                         ret = -ENOMEM;
1464                         goto err_mm_alloc;
1465                 }
1466                 mmu->twl_mm = mm;
1467         }
1468
1469         ret = device_register(&mmu->dev);
1470         if (unlikely(ret))
1471                 goto err_dev_register;
1472
1473         init_rwsem(&mmu->exmap_sem);
1474
1475         ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1476         printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1477                mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1478
1479         ret = omap_mmu_init(mmu);
1480         if (unlikely(ret))
1481                 goto err_mmu_init;
1482
1483         ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1484         if (unlikely(ret))
1485                 goto err_dev_create_mmu;
1486         ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1487         if (unlikely(ret))
1488                 goto err_dev_create_exmap;
1489
1490         if (likely(mmu->membase)) {
1491                 dev_attr_mem.size = mmu->memsize;
1492                 ret = device_create_bin_file(&mmu->dev,
1493                                              &dev_attr_mem);
1494                 if (unlikely(ret))
1495                         goto err_bin_create_mem;
1496         }
1497
1498         return 0;
1499
1500 err_bin_create_mem:
1501         device_remove_file(&mmu->dev, &dev_attr_exmap);
1502 err_dev_create_exmap:
1503         device_remove_file(&mmu->dev, &dev_attr_mmu);
1504 err_dev_create_mmu:
1505         omap_mmu_shutdown(mmu);
1506 err_mmu_init:
1507         device_unregister(&mmu->dev);
1508 err_dev_register:
1509         kfree(mmu->twl_mm);
1510         mmu->twl_mm = NULL;
1511 err_mm_alloc:
1512         kfree(mmu->exmap_tbl);
1513         mmu->exmap_tbl = NULL;
1514         return ret;
1515 }
1516 EXPORT_SYMBOL_GPL(omap_mmu_register);
1517
1518 void omap_mmu_unregister(struct omap_mmu *mmu)
1519 {
1520         omap_mmu_shutdown(mmu);
1521         omap_mmu_kmem_release();
1522
1523         device_remove_file(&mmu->dev, &dev_attr_mmu);
1524         device_remove_file(&mmu->dev, &dev_attr_exmap);
1525
1526         if (likely(mmu->membase))
1527                 device_remove_bin_file(&mmu->dev,
1528                                              &dev_attr_mem);
1529
1530         kfree(mmu->exmap_tbl);
1531         mmu->exmap_tbl = NULL;
1532
1533         if (mmu->ops->pte_get_attr) {
1534                 if (mmu->twl_mm) {
1535                         __mmdrop(mmu->twl_mm);
1536                         mmu->twl_mm = NULL;
1537                 }
1538         }
1539
1540         device_unregister(&mmu->dev);
1541 }
1542 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1543
1544 static int __init omap_mmu_class_init(void)
1545 {
1546         int ret = class_register(&omap_mmu_class);
1547         if (!ret)
1548                 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1549
1550         return ret;
1551 }
1552
1553 static void __exit omap_mmu_class_exit(void)
1554 {
1555         class_remove_file(&omap_mmu_class, &class_attr_mempool);
1556         class_unregister(&omap_mmu_class);
1557 }
1558
1559 subsys_initcall(omap_mmu_class_init);
1560 module_exit(omap_mmu_class_exit);
1561
1562 MODULE_LICENSE("GPL");