]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/mmu.c
Merge omap-upstream
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
1 /*
2  * linux/arch/arm/plat-omap/mmu.c
3  *
4  * OMAP MMU management framework
5  *
6  * Copyright (C) 2002-2006 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <lethal@linux-sh.org>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  */
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/err.h>
32 #include <linux/clk.h>
33 #include <linux/device.h>
34 #include <linux/interrupt.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/arch/mmu.h>
40 #include <asm/sizes.h>
41
42 #if defined(CONFIG_ARCH_OMAP1)
43 #include "../mach-omap1/mmu.h"
44 #elif defined(CONFIG_ARCH_OMAP2)
45 #include "../mach-omap2/mmu.h"
46 #endif
47
48 /*
49  * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
50  * MMU has base and victim implemented in different bits in the LOCK
51  * register (shifts are still the same), all of the other registers are
52  * the same on all of the MMUs..
53  */
54 #define MMU_LOCK_BASE_SHIFT             10
55 #define MMU_LOCK_VICTIM_SHIFT           4
56
57 #define CAMERA_MMU_LOCK_BASE_MASK       (0x7 << MMU_LOCK_BASE_SHIFT)
58 #define CAMERA_MMU_LOCK_VICTIM_MASK     (0x7 << MMU_LOCK_VICTIM_SHIFT)
59
60 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
61 #define ORDER_1MB       (20 - PAGE_SHIFT)
62 #define ORDER_64KB      (16 - PAGE_SHIFT)
63 #define ORDER_4KB       (12 - PAGE_SHIFT)
64
65 #define MMU_CNTL_EMUTLBUPDATE   (1<<3)
66 #define MMU_CNTL_TWLENABLE      (1<<2)
67 #define MMU_CNTL_MMUENABLE      (1<<1)
68
69 static mempool_t *mempool_1M;
70 static mempool_t *mempool_64K;
71
72 #define omap_mmu_for_each_tlb_entry(mmu, entry)                 \
73         for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
74              entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
75              entry++)
76
77 #define to_dev(obj)     container_of(obj, struct device, kobj)
78
79 static void *mempool_alloc_from_pool(mempool_t *pool,
80                                      unsigned int __nocast gfp_mask)
81 {
82         spin_lock_irq(&pool->lock);
83         if (likely(pool->curr_nr)) {
84                 void *element = pool->elements[--pool->curr_nr];
85                 spin_unlock_irq(&pool->lock);
86                 return element;
87         }
88
89         spin_unlock_irq(&pool->lock);
90         return mempool_alloc(pool, gfp_mask);
91 }
92
93 /*
94  * kmem_reserve(), kmem_release():
95  * reserve or release kernel memory for exmap().
96  *
97  * exmap() might request consecutive 1MB or 64kB,
98  * but it will be difficult after memory pages are fragmented.
99  * So, user can reserve such memory blocks in the early phase
100  * through kmem_reserve().
101  */
102 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
103 {
104         return (void *)__get_dma_pages(gfp, (unsigned int)order);
105 }
106
107 static void omap_mmu_pool_free(void *buf, void *order)
108 {
109         free_pages((unsigned long)buf, (unsigned int)order);
110 }
111
112 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
113 {
114         unsigned long len = size;
115
116         /* alignment check */
117         if (!is_aligned(size, SZ_64K)) {
118                 printk(KERN_ERR
119                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
120                 return -EINVAL;
121         }
122
123         if (size > (1 << mmu->addrspace)) {
124                 printk(KERN_ERR
125                        "omapdsp: size(0x%lx) is larger than DSP memory space "
126                        "size (0x%x.\n", size, (1 << mmu->addrspace));
127                 return -EINVAL;
128         }
129
130         if (size >= SZ_1M) {
131                 int nr = size >> 20;
132
133                 if (likely(!mempool_1M))
134                         mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
135                                                     omap_mmu_pool_free,
136                                                     (void *)ORDER_1MB);
137                 else
138                         mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
139                                        GFP_KERNEL);
140
141                 size &= ~(0xf << 20);
142         }
143
144         if (size >= SZ_64K) {
145                 int nr = size >> 16;
146
147                 if (likely(!mempool_64K))
148                         mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
149                                                      omap_mmu_pool_free,
150                                                      (void *)ORDER_64KB);
151                 else
152                         mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
153                                        GFP_KERNEL);
154
155                 size &= ~(0xf << 16);
156         }
157
158         if (size)
159                 len -= size;
160
161         return len;
162 }
163 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
164
165 void omap_mmu_kmem_release(void)
166 {
167         if (mempool_64K) {
168                 mempool_destroy(mempool_64K);
169                 mempool_64K = NULL;
170         }
171
172         if (mempool_1M) {
173                 mempool_destroy(mempool_1M);
174                 mempool_1M = NULL;
175         }
176 }
177 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
178
179 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
180 {
181         struct page *page, *ps, *pe;
182
183         ps = virt_to_page(buf);
184         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
185
186         for (page = ps; page < pe; page++)
187                 ClearPageReserved(page);
188
189         if ((order == ORDER_64KB) && likely(mempool_64K))
190                 mempool_free((void *)buf, mempool_64K);
191         else if ((order == ORDER_1MB) && likely(mempool_1M))
192                 mempool_free((void *)buf, mempool_1M);
193         else
194                 free_pages(buf, order);
195 }
196
197 /*
198  * ARM MMU operations
199  */
200 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
201 {
202         long off;
203         unsigned long sz_left;
204         pmd_t *pmdp;
205         pte_t *ptep;
206         int prot_pmd, prot_pte;
207
208         printk(KERN_DEBUG
209                "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
210                virt, phys, size);
211
212         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
213         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
214
215         pmdp = pmd_offset(pgd_offset_k(virt), virt);
216         if (pmd_none(*pmdp)) {
217                 ptep = pte_alloc_one_kernel(&init_mm, 0);
218                 if (ptep == NULL)
219                         return -ENOMEM;
220                 /* note: two PMDs will be set  */
221                 pmd_populate_kernel(&init_mm, pmdp, ptep);
222         }
223
224         off = phys - virt;
225         for (sz_left = size;
226              sz_left >= PAGE_SIZE;
227              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
228                 ptep = pte_offset_kernel(pmdp, virt);
229                 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
230         }
231         if (sz_left)
232                 BUG();
233
234         return 0;
235 }
236 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
237
238 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
239 {
240         unsigned long sz_left;
241         pmd_t *pmdp;
242         pte_t *ptep;
243
244         printk(KERN_DEBUG
245                "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
246                virt, size);
247
248         for (sz_left = size;
249              sz_left >= PAGE_SIZE;
250              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
251                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
252                 ptep = pte_offset_kernel(pmdp, virt);
253                 pte_clear(&init_mm, virt, ptep);
254         }
255         if (sz_left)
256                 BUG();
257 }
258 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
259
260 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
261 {
262         /* exmap_sem should be held before calling this function */
263         struct exmap_tbl *ent;
264
265 start:
266         omap_mmu_for_each_tlb_entry(mmu, ent) {
267                 void *mapadr;
268                 unsigned long mapsize;
269
270                 if (!ent->valid)
271                         continue;
272                 mapadr = (void *)ent->vadr;
273                 mapsize = 1 << (ent->order + PAGE_SHIFT);
274                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
275                         if (vadr + len <= mapadr + mapsize) {
276                                 /* this map covers whole address. */
277                                 return 1;
278                         } else {
279                                 /*
280                                  * this map covers partially.
281                                  * check rest portion.
282                                  */
283                                 len -= mapadr + mapsize - vadr;
284                                 vadr = mapadr + mapsize;
285                                 goto start;
286                         }
287                 }
288         }
289
290         return 0;
291 }
292 EXPORT_SYMBOL_GPL(exmap_valid);
293
294 /*
295  * omap_mmu_exmap_use(), unuse():
296  * when the mapped area is exported to user space with mmap,
297  * the usecount is incremented.
298  * while the usecount > 0, that area can't be released.
299  */
300 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
301 {
302         struct exmap_tbl *ent;
303
304         down_write(&mmu->exmap_sem);
305         omap_mmu_for_each_tlb_entry(mmu, ent) {
306                 void *mapadr;
307                 unsigned long mapsize;
308
309                 if (!ent->valid)
310                         continue;
311                 mapadr = (void *)ent->vadr;
312                 mapsize = 1 << (ent->order + PAGE_SHIFT);
313                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
314                         ent->usecount++;
315         }
316         up_write(&mmu->exmap_sem);
317 }
318 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
319
320 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
321 {
322         struct exmap_tbl *ent;
323
324         down_write(&mmu->exmap_sem);
325         omap_mmu_for_each_tlb_entry(mmu, ent) {
326                 void *mapadr;
327                 unsigned long mapsize;
328
329                 if (!ent->valid)
330                         continue;
331                 mapadr = (void *)ent->vadr;
332                 mapsize = 1 << (ent->order + PAGE_SHIFT);
333                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
334                         ent->usecount--;
335         }
336         up_write(&mmu->exmap_sem);
337 }
338 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
339
340 /*
341  * omap_mmu_virt_to_phys()
342  * returns physical address, and sets len to valid length
343  */
344 unsigned long
345 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
346 {
347         struct exmap_tbl *ent;
348
349         if (omap_mmu_internal_memory(mmu, vadr)) {
350                 unsigned long addr = (unsigned long)vadr;
351                 *len = mmu->membase + mmu->memsize - addr;
352                 return addr;
353         }
354
355         /* EXRAM */
356         omap_mmu_for_each_tlb_entry(mmu, ent) {
357                 void *mapadr;
358                 unsigned long mapsize;
359
360                 if (!ent->valid)
361                         continue;
362                 mapadr = (void *)ent->vadr;
363                 mapsize = 1 << (ent->order + PAGE_SHIFT);
364                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
365                         *len = mapadr + mapsize - vadr;
366                         return __pa(ent->buf) + vadr - mapadr;
367                 }
368         }
369
370         /* valid mapping not found */
371         return 0;
372 }
373 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
374
375 /*
376  * PTE operations
377  */
378 static inline void
379 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
380                        unsigned long phys, int prot)
381 {
382         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
383         if (virt & (1 << SECTION_SHIFT))
384                 pmdp++;
385         *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
386         flush_pmd_entry(pmdp);
387 }
388
389 static inline void
390 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
391                             unsigned long phys, int prot)
392 {
393         int i;
394         for (i = 0; i < 16; i += 1) {
395                 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
396                 virt += (PGDIR_SIZE / 2);
397         }
398 }
399
400 static inline int
401 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
402                     unsigned long phys, pgprot_t prot)
403 {
404         pte_t *ptep;
405         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
406
407         if (!(prot & PTE_TYPE_MASK))
408                 prot |= PTE_TYPE_SMALL;
409
410         if (pmd_none(*pmdp)) {
411                 ptep = pte_alloc_one_kernel(mm, virt);
412                 if (ptep == NULL)
413                         return -ENOMEM;
414                 pmd_populate_kernel(mm, pmdp, ptep);
415         }
416         ptep = pte_offset_kernel(pmdp, virt);
417         ptep -= PTRS_PER_PTE;
418         *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
419         flush_pmd_entry((pmd_t *)ptep);
420         return 0;
421 }
422
423 static inline int
424 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
425                          unsigned long phys, pgprot_t prot)
426 {
427         int i, ret;
428         for (i = 0; i < 16; i += 1) {
429                 ret = omap_mmu_alloc_page(mm, virt, phys,
430                                           prot | PTE_TYPE_LARGE);
431                 if (ret)
432                         return -ENOMEM; /* only 1st time */
433                 virt += PAGE_SIZE;
434         }
435         return 0;
436 }
437
438 static int omap_mmu_load_pte(struct omap_mmu *mmu,
439                              struct omap_mmu_tlb_entry *e)
440 {
441         int ret = 0;
442         struct mm_struct *mm = mmu->twl_mm;
443         const unsigned long va = e->va;
444         const unsigned long pa = e->pa;
445         const pgprot_t prot = mmu->ops->pte_get_attr(e);
446
447         spin_lock(&mm->page_table_lock);
448
449         switch (e->pgsz) {
450         case OMAP_MMU_CAM_PAGESIZE_16MB:
451                 omap_mmu_alloc_supersection(mm, va, pa, prot);
452                 break;
453         case OMAP_MMU_CAM_PAGESIZE_1MB:
454                 omap_mmu_alloc_section(mm, va, pa, prot);
455                 break;
456         case OMAP_MMU_CAM_PAGESIZE_64KB:
457                 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
458                 break;
459         case OMAP_MMU_CAM_PAGESIZE_4KB:
460                 ret = omap_mmu_alloc_page(mm, va, pa, prot);
461                 break;
462         default:
463                 BUG();
464                 break;
465         }
466
467         spin_unlock(&mm->page_table_lock);
468
469         return ret;
470 }
471
472 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
473 {
474         pte_t *ptep, *end;
475         pmd_t *pmdp;
476         struct mm_struct *mm = mmu->twl_mm;
477
478         spin_lock(&mm->page_table_lock);
479
480         pmdp = pmd_offset(pgd_offset(mm, virt), virt);
481
482         if (pmd_none(*pmdp))
483                 goto out;
484
485         if (!pmd_table(*pmdp))
486                 goto invalidate_pmd;
487
488         ptep = pte_offset_kernel(pmdp, virt);
489         pte_clear(mm, virt, ptep);
490         flush_pmd_entry((pmd_t *)ptep);
491
492         /* zap pte */
493         end = pmd_page_vaddr(*pmdp);
494         ptep = end - PTRS_PER_PTE;
495         while (ptep < end) {
496                 if (!pte_none(*ptep))
497                         goto out;
498                 ptep++;
499         }
500         pte_free_kernel(pmd_page_vaddr(*pmdp));
501
502  invalidate_pmd:
503         pmd_clear(pmdp);
504         flush_pmd_entry(pmdp);
505  out:
506         spin_unlock(&mm->page_table_lock);
507 }
508
509 /*
510  * TLB operations
511  */
512 static struct cam_ram_regset *
513 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
514 {
515         return mmu->ops->cam_ram_alloc(entry);
516 }
517
518 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
519                                   struct cam_ram_regset *cr)
520 {
521         return mmu->ops->cam_ram_valid(cr);
522 }
523
524 static inline void
525 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
526 {
527         unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
528         int mask;
529
530         mask = (mmu->type == OMAP_MMU_CAMERA) ?
531                         CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
532         tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
533
534         mask = (mmu->type == OMAP_MMU_CAMERA) ?
535                         CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
536         tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
537 }
538
539 static inline void
540 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
541 {
542         omap_mmu_write_reg(mmu,
543                            (lock->base << MMU_LOCK_BASE_SHIFT) |
544                            (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
545 }
546
547 static inline void omap_mmu_flush(struct omap_mmu *mmu)
548 {
549         omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
550 }
551
552 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
553 {
554         omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
555 }
556
557 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
558                        struct cam_ram_regset *cr)
559 {
560         /* set victim */
561         omap_mmu_set_tlb_lock(mmu, lock);
562
563         if (likely(mmu->ops->read_tlb))
564                 mmu->ops->read_tlb(mmu, cr);
565 }
566 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
567
568 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
569 {
570         if (likely(mmu->ops->load_tlb))
571                 mmu->ops->load_tlb(mmu, cr);
572
573         /* flush the entry */
574         omap_mmu_flush(mmu);
575
576         /* load a TLB entry */
577         omap_mmu_ldtlb(mmu);
578 }
579
580 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
581                             struct omap_mmu_tlb_entry *entry)
582 {
583         struct omap_mmu_tlb_lock lock;
584         struct cam_ram_regset *cr;
585
586         clk_enable(mmu->clk);
587         omap_dsp_request_mem();
588
589         omap_mmu_get_tlb_lock(mmu, &lock);
590         for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
591                 struct cam_ram_regset tmp;
592
593                 /* read a TLB entry */
594                 omap_mmu_read_tlb(mmu, &lock, &tmp);
595                 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
596                         goto found_victim;
597         }
598         omap_mmu_set_tlb_lock(mmu, &lock);
599
600 found_victim:
601         /* The last entry cannot be locked? */
602         if (lock.victim == (mmu->nr_tlb_entries - 1)) {
603                 printk(KERN_ERR "MMU: TLB is full.\n");
604                 return -EBUSY;
605         }
606
607         cr = omap_mmu_cam_ram_alloc(mmu, entry);
608         if (IS_ERR(cr))
609                 return PTR_ERR(cr);
610
611         omap_mmu_load_tlb(mmu, cr);
612         kfree(cr);
613
614         /* update lock base */
615         if (lock.victim == lock.base)
616                 lock.base++;
617
618         omap_mmu_set_tlb_lock(mmu, &lock);
619
620         omap_dsp_release_mem();
621         clk_disable(mmu->clk);
622         return 0;
623 }
624 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
625
626 static inline unsigned long
627 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
628 {
629         return mmu->ops->cam_va(cr);
630 }
631
632 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
633 {
634         struct omap_mmu_tlb_lock lock;
635         int i;
636         int max_valid = 0;
637
638         clk_enable(mmu->clk);
639         omap_dsp_request_mem();
640
641         omap_mmu_get_tlb_lock(mmu, &lock);
642         for (i = 0; i < lock.base; i++) {
643                 struct cam_ram_regset cr;
644
645                 /* read a TLB entry */
646                 lock.victim = i;
647                 omap_mmu_read_tlb(mmu, &lock, &cr);
648                 if (!omap_mmu_cam_ram_valid(mmu, &cr))
649                         continue;
650
651                 if (omap_mmu_cam_va(mmu, &cr) == vadr)
652                         /* flush the entry */
653                         omap_mmu_flush(mmu);
654                 else
655                         max_valid = i;
656         }
657
658         /* set new lock base */
659         lock.base = lock.victim = max_valid + 1;
660         omap_mmu_set_tlb_lock(mmu, &lock);
661
662         omap_dsp_release_mem();
663         clk_disable(mmu->clk);
664         return 0;
665 }
666 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
667
668 static void omap_mmu_gflush(struct omap_mmu *mmu)
669 {
670         struct omap_mmu_tlb_lock lock;
671
672         clk_enable(mmu->clk);
673         omap_dsp_request_mem();
674
675         omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
676         lock.base = lock.victim = mmu->nr_exmap_preserved;
677         omap_mmu_set_tlb_lock(mmu, &lock);
678
679         omap_dsp_release_mem();
680         clk_disable(mmu->clk);
681 }
682
683 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
684                             struct omap_mmu_tlb_entry *entry)
685 {
686         int ret = -1;
687         if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
688                 /*XXX use PG_flag for prsvd */
689                 ret = omap_mmu_load_pte(mmu, entry);
690                 if (ret)
691                         return ret;
692         }
693         if (entry->tlb)
694                 ret = omap_mmu_load_tlb_entry(mmu, entry);
695         return ret;
696 }
697 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
698
699 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
700 {
701         int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
702         if (ret)
703                 return ret;
704         if (mmu->ops->pte_get_attr)
705                 omap_mmu_clear_pte(mmu, vadr);
706         return ret;
707 }
708 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
709
710 /*
711  * omap_mmu_exmap()
712  *
713  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
714  * In this case, the buffer for DSP is allocated in this routine,
715  * then it is mapped.
716  * On the other hand, for example - frame buffer sharing, calls
717  * this function with padr set. It means some known address space
718  * pointed with padr is going to be shared with DSP.
719  */
720 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
721                    unsigned long padr, unsigned long size,
722                    enum exmap_type type)
723 {
724         unsigned long pgsz;
725         void *buf;
726         unsigned int order = 0;
727         unsigned long unit;
728         int prev = -1;
729         unsigned long _dspadr = dspadr;
730         unsigned long _padr = padr;
731         void *_vadr = omap_mmu_to_virt(mmu, dspadr);
732         unsigned long _size = size;
733         struct omap_mmu_tlb_entry tlb_ent;
734         struct exmap_tbl *exmap_ent, *tmp_ent;
735         int status;
736         int idx;
737
738 #define MINIMUM_PAGESZ  SZ_4K
739         /*
740          * alignment check
741          */
742         if (!is_aligned(size, MINIMUM_PAGESZ)) {
743                 printk(KERN_ERR
744                        "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
745                 return -EINVAL;
746         }
747         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
748                 printk(KERN_ERR
749                        "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
750                 return -EINVAL;
751         }
752         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
753                 printk(KERN_ERR
754                        "MMU: physical address(0x%lx) is not aligned.\n",
755                        padr);
756                 return -EINVAL;
757         }
758
759         /* address validity check */
760         if ((dspadr < mmu->memsize) ||
761             (dspadr >= (1 << mmu->addrspace))) {
762                 printk(KERN_ERR
763                        "MMU: illegal address/size for %s().\n",
764                        __FUNCTION__);
765                 return -EINVAL;
766         }
767
768         down_write(&mmu->exmap_sem);
769
770         /* overlap check */
771         omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
772                 unsigned long mapsize;
773
774                 if (!tmp_ent->valid)
775                         continue;
776                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
777                 if ((_vadr + size > tmp_ent->vadr) &&
778                     (_vadr < tmp_ent->vadr + mapsize)) {
779                         printk(KERN_ERR "MMU: exmap page overlap!\n");
780                         up_write(&mmu->exmap_sem);
781                         return -EINVAL;
782                 }
783         }
784
785 start:
786         buf = NULL;
787         /* Are there any free TLB lines?  */
788         for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
789                 if (!mmu->exmap_tbl[idx].valid)
790                         goto found_free;
791
792         printk(KERN_ERR "MMU: DSP TLB is full.\n");
793         status = -EBUSY;
794         goto fail;
795
796 found_free:
797         exmap_ent = mmu->exmap_tbl + idx;
798
799         if ((_size >= SZ_1M) &&
800             (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
801             is_aligned(_dspadr, SZ_1M)) {
802                 unit = SZ_1M;
803                 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
804         } else if ((_size >= SZ_64K) &&
805                    (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
806                    is_aligned(_dspadr, SZ_64K)) {
807                 unit = SZ_64K;
808                 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
809         } else {
810                 unit = SZ_4K;
811                 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
812         }
813
814         order = get_order(unit);
815
816         /* buffer allocation */
817         if (type == EXMAP_TYPE_MEM) {
818                 struct page *page, *ps, *pe;
819
820                 if ((order == ORDER_1MB) && likely(mempool_1M))
821                         buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
822                 else if ((order == ORDER_64KB) && likely(mempool_64K))
823                         buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
824                 else {
825                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
826                         if (buf == NULL) {
827                                 status = -ENOMEM;
828                                 goto fail;
829                         }
830                 }
831
832                 /* mark the pages as reserved; this is needed for mmap */
833                 ps = virt_to_page(buf);
834                 pe = virt_to_page(buf + unit);
835
836                 for (page = ps; page < pe; page++)
837                         SetPageReserved(page);
838
839                 _padr = __pa(buf);
840         }
841
842         /*
843          * mapping for ARM MMU:
844          * we should not access to the allocated memory through 'buf'
845          * since this area should not be cached.
846          */
847         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
848         if (status < 0)
849                 goto fail;
850
851         /* loading DSP PTE entry */
852         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
853         status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
854         if (status < 0) {
855                 exmap_clear_armmmu((unsigned long)_vadr, unit);
856                 goto fail;
857         }
858
859         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
860         exmap_ent->link.prev = prev;
861         if (prev >= 0)
862                 mmu->exmap_tbl[prev].link.next = idx;
863
864         if ((_size -= unit) == 0) {     /* normal completion */
865                 up_write(&mmu->exmap_sem);
866                 return size;
867         }
868
869         _dspadr += unit;
870         _vadr   += unit;
871         _padr = padr ? _padr + unit : 0;
872         prev = idx;
873         goto start;
874
875 fail:
876         up_write(&mmu->exmap_sem);
877         if (buf)
878                 omap_mmu_free_pages((unsigned long)buf, order);
879         omap_mmu_exunmap(mmu, dspadr);
880         return status;
881 }
882 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
883
884 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
885 {
886         unsigned long size;
887
888         /* clearing ARM MMU */
889         size = 1 << (ent->order + PAGE_SHIFT);
890         exmap_clear_armmmu((unsigned long)ent->vadr, size);
891
892         /* freeing allocated memory */
893         if (ent->type == EXMAP_TYPE_MEM) {
894                 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
895                 printk(KERN_DEBUG
896                        "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
897                        size, ent->buf);
898         }
899
900         ent->valid = 0;
901         return size;
902 }
903
904 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
905 {
906         void *vadr;
907         unsigned long size;
908         int total = 0;
909         struct exmap_tbl *ent;
910         int idx;
911
912         vadr = omap_mmu_to_virt(mmu, dspadr);
913         down_write(&mmu->exmap_sem);
914         for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
915                 ent = mmu->exmap_tbl + idx;
916                 if (!ent->valid || ent->prsvd)
917                         continue;
918                 if (ent->vadr == vadr)
919                         goto found_map;
920         }
921         up_write(&mmu->exmap_sem);
922         printk(KERN_WARNING
923                "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
924         return -EINVAL;
925
926 found_map:
927         if (ent->usecount > 0) {
928                 printk(KERN_ERR
929                        "MMU: exmap reference count is not 0.\n"
930                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
931                        idx, ent->vadr, ent->order, ent->usecount);
932                 up_write(&mmu->exmap_sem);
933                 return -EINVAL;
934         }
935         /* clearing DSP PTE entry */
936         omap_mmu_clear_pte_entry(mmu, dspadr);
937
938         /* clear ARM MMU and free buffer */
939         size = unmap_free_arm(ent);
940         total += size;
941
942         /* we don't free PTEs */
943
944         /* flush TLB */
945         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
946
947         /* check if next mapping is in same group */
948         idx = ent->link.next;
949         if (idx < 0)
950                 goto up_out;    /* normal completion */
951         ent = mmu->exmap_tbl + idx;
952         dspadr += size;
953         vadr   += size;
954         if (ent->vadr == vadr)
955                 goto found_map; /* continue */
956
957         printk(KERN_ERR
958                "MMU: illegal exmap_tbl grouping!\n"
959                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
960                vadr, idx, ent->vadr);
961         up_write(&mmu->exmap_sem);
962         return -EINVAL;
963
964 up_out:
965         up_write(&mmu->exmap_sem);
966         return total;
967 }
968 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
969
970 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
971 {
972         struct exmap_tbl *ent;
973
974         down_write(&mmu->exmap_sem);
975
976         /* clearing TLB entry */
977         omap_mmu_gflush(mmu);
978
979         omap_mmu_for_each_tlb_entry(mmu, ent)
980                 if (ent->valid && !ent->prsvd)
981                         unmap_free_arm(ent);
982
983         /* flush TLB */
984         if (likely(mmu->membase))
985                 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
986                                        mmu->membase + (1 << mmu->addrspace));
987
988         up_write(&mmu->exmap_sem);
989 }
990 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
991
992 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
993                                     unsigned long dspadr, int index)
994 {
995         unsigned long phys;
996         void *virt;
997         struct omap_mmu_tlb_entry tlb_ent;
998
999         phys = __pa(buf);
1000         virt = omap_mmu_to_virt(mmu, dspadr);
1001         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1002         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1003         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1004         omap_mmu_load_pte_entry(mmu, &tlb_ent);
1005 }
1006 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1007
1008 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
1009 {
1010         void *virt = omap_mmu_to_virt(mmu, dspadr);
1011
1012         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1013         /* DSP MMU is shutting down. not handled here. */
1014 }
1015 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1016
1017 static void omap_mmu_reset(struct omap_mmu *mmu)
1018 {
1019         int i;
1020
1021         omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
1022
1023         for (i = 0; i < 10000; i++)
1024                 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
1025                         break;
1026 }
1027
1028 void omap_mmu_disable(struct omap_mmu *mmu)
1029 {
1030         omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
1031 }
1032 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1033
1034 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1035 {
1036         u32 val = MMU_CNTL_MMUENABLE;
1037         u32 pa = (u32)virt_to_phys(mmu->twl_mm->pgd);
1038
1039         if (likely(reset))
1040                 omap_mmu_reset(mmu);
1041
1042         if (mmu->ops->pte_get_attr) {
1043                 omap_mmu_write_reg(mmu, pa, MMU_TTB);
1044                 val |= MMU_CNTL_TWLENABLE;
1045         }
1046
1047         omap_mmu_write_reg(mmu, val, MMU_CNTL);
1048 }
1049 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1050
1051 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1052 {
1053         struct omap_mmu *mmu = dev_id;
1054
1055         if (likely(mmu->ops->interrupt))
1056                 mmu->ops->interrupt(mmu);
1057
1058         return IRQ_HANDLED;
1059 }
1060
1061 static int omap_mmu_init(struct omap_mmu *mmu)
1062 {
1063         struct omap_mmu_tlb_lock tlb_lock;
1064         int ret = 0;
1065
1066         clk_enable(mmu->clk);
1067         omap_dsp_request_mem();
1068         down_write(&mmu->exmap_sem);
1069
1070         ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1071                           mmu->name,  mmu);
1072         if (ret < 0) {
1073                 printk(KERN_ERR
1074                        "failed to register MMU interrupt: %d\n", ret);
1075                 goto fail;
1076         }
1077
1078         omap_mmu_disable(mmu);  /* clear all */
1079         udelay(100);
1080         omap_mmu_enable(mmu, 1);
1081
1082         memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1083         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1084
1085         if (unlikely(mmu->ops->startup))
1086                 ret = mmu->ops->startup(mmu);
1087  fail:
1088         up_write(&mmu->exmap_sem);
1089         omap_dsp_release_mem();
1090         clk_disable(mmu->clk);
1091
1092         return ret;
1093 }
1094
1095 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1096 {
1097         free_irq(mmu->irq, mmu);
1098
1099         if (unlikely(mmu->ops->shutdown))
1100                 mmu->ops->shutdown(mmu);
1101
1102         omap_mmu_exmap_flush(mmu);
1103         omap_mmu_disable(mmu); /* clear all */
1104 }
1105
1106 /*
1107  * omap_mmu_mem_enable() / disable()
1108  */
1109 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1110 {
1111         if (unlikely(mmu->ops->mem_enable))
1112                 return mmu->ops->mem_enable(mmu, addr);
1113
1114         down_read(&mmu->exmap_sem);
1115         return 0;
1116 }
1117 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1118
1119 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1120 {
1121         if (unlikely(mmu->ops->mem_disable)) {
1122                 mmu->ops->mem_disable(mmu, addr);
1123                 return;
1124         }
1125
1126         up_read(&mmu->exmap_sem);
1127 }
1128 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1129
1130 /*
1131  * dsp_mem file operations
1132  */
1133 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1134                            loff_t *ppos)
1135 {
1136         unsigned long p = *ppos;
1137         void *vadr = omap_mmu_to_virt(mmu, p);
1138         ssize_t size = mmu->memsize;
1139         ssize_t read;
1140
1141         if (p >= size)
1142                 return 0;
1143         clk_enable(mmu->memclk);
1144         read = count;
1145         if (count > size - p)
1146                 read = size - p;
1147         if (copy_to_user(buf, vadr, read)) {
1148                 read = -EFAULT;
1149                 goto out;
1150         }
1151         *ppos += read;
1152 out:
1153         clk_disable(mmu->memclk);
1154         return read;
1155 }
1156
1157 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1158                           loff_t *ppos)
1159 {
1160         unsigned long p = *ppos;
1161         void *vadr = omap_mmu_to_virt(mmu, p);
1162
1163         if (!exmap_valid(mmu, vadr, count)) {
1164                 printk(KERN_ERR
1165                        "MMU: DSP address %08lx / size %08x "
1166                        "is not valid!\n", p, count);
1167                 return -EFAULT;
1168         }
1169         if (count > (1 << mmu->addrspace) - p)
1170                 count = (1 << mmu->addrspace) - p;
1171         if (copy_to_user(buf, vadr, count))
1172                 return -EFAULT;
1173         *ppos += count;
1174
1175         return count;
1176 }
1177
1178 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1179                                  loff_t offset, size_t count)
1180 {
1181         struct device *dev = to_dev(kobj);
1182         struct omap_mmu *mmu = dev_get_drvdata(dev);
1183         unsigned long p = (unsigned long)offset;
1184         void *vadr = omap_mmu_to_virt(mmu, p);
1185         int ret;
1186
1187         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1188                 return -EBUSY;
1189
1190         if (p < mmu->memsize)
1191                 ret = intmem_read(mmu, buf, count, &offset);
1192         else
1193                 ret = exmem_read(mmu, buf, count, &offset);
1194
1195         omap_mmu_mem_disable(mmu, vadr);
1196
1197         return ret;
1198 }
1199
1200 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1201                             loff_t *ppos)
1202 {
1203         unsigned long p = *ppos;
1204         void *vadr = omap_mmu_to_virt(mmu, p);
1205         ssize_t size = mmu->memsize;
1206         ssize_t written;
1207
1208         if (p >= size)
1209                 return 0;
1210         clk_enable(mmu->memclk);
1211         written = count;
1212         if (count > size - p)
1213                 written = size - p;
1214         if (copy_from_user(vadr, buf, written)) {
1215                 written = -EFAULT;
1216                 goto out;
1217         }
1218         *ppos += written;
1219 out:
1220         clk_disable(mmu->memclk);
1221         return written;
1222 }
1223
1224 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1225                            loff_t *ppos)
1226 {
1227         unsigned long p = *ppos;
1228         void *vadr = omap_mmu_to_virt(mmu, p);
1229
1230         if (!exmap_valid(mmu, vadr, count)) {
1231                 printk(KERN_ERR
1232                        "MMU: DSP address %08lx / size %08x "
1233                        "is not valid!\n", p, count);
1234                 return -EFAULT;
1235         }
1236         if (count > (1 << mmu->addrspace) - p)
1237                 count = (1 << mmu->addrspace) - p;
1238         if (copy_from_user(vadr, buf, count))
1239                 return -EFAULT;
1240         *ppos += count;
1241
1242         return count;
1243 }
1244
1245 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1246                                   loff_t offset, size_t count)
1247 {
1248         struct device *dev = to_dev(kobj);
1249         struct omap_mmu *mmu = dev_get_drvdata(dev);
1250         unsigned long p = (unsigned long)offset;
1251         void *vadr = omap_mmu_to_virt(mmu, p);
1252         int ret;
1253
1254         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1255                 return -EBUSY;
1256
1257         if (p < mmu->memsize)
1258                 ret = intmem_write(mmu, buf, count, &offset);
1259         else
1260                 ret = exmem_write(mmu, buf, count, &offset);
1261
1262         omap_mmu_mem_disable(mmu, vadr);
1263
1264         return ret;
1265 }
1266
1267 static struct bin_attribute dev_attr_mem = {
1268         .attr   = {
1269                 .name   = "mem",
1270                 .owner  = THIS_MODULE,
1271                 .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
1272         },
1273
1274         .read   = omap_mmu_mem_read,
1275         .write  = omap_mmu_mem_write,
1276 };
1277
1278 /* To be obsolete for backward compatibility */
1279 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1280                             loff_t offset, size_t count)
1281 {
1282         return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1283 }
1284 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1285
1286 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1287                              loff_t offset, size_t count)
1288 {
1289         return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1290 }
1291 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1292
1293 /*
1294  * sysfs files
1295  */
1296 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1297                              char *buf)
1298 {
1299         struct omap_mmu *mmu = dev_get_drvdata(dev);
1300         struct omap_mmu_tlb_lock tlb_lock;
1301         int ret = -EIO;
1302
1303         clk_enable(mmu->clk);
1304         omap_dsp_request_mem();
1305
1306         down_read(&mmu->exmap_sem);
1307
1308         omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1309
1310         if (likely(mmu->ops->show))
1311                 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1312
1313         /* restore victim entry */
1314         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1315
1316         up_read(&mmu->exmap_sem);
1317         omap_dsp_release_mem();
1318         clk_disable(mmu->clk);
1319
1320         return ret;
1321 }
1322
1323 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1324
1325 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1326                           char *buf)
1327 {
1328         struct omap_mmu *mmu = dev_get_drvdata(dev);
1329         struct exmap_tbl *ent;
1330         int len;
1331         int i = 0;
1332
1333         down_read(&mmu->exmap_sem);
1334         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
1335                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
1336
1337         omap_mmu_for_each_tlb_entry(mmu, ent) {
1338                 void *vadr;
1339                 unsigned long size;
1340                 enum exmap_type type;
1341                 int idx;
1342
1343                 /* find a top of link */
1344                 if (!ent->valid || (ent->link.prev >= 0))
1345                         continue;
1346
1347                 vadr = ent->vadr;
1348                 type = ent->type;
1349                 size = 0;
1350                 idx = i;
1351                 do {
1352                         ent = mmu->exmap_tbl + idx;
1353                         size += PAGE_SIZE << ent->order;
1354                 } while ((idx = ent->link.next) >= 0);
1355
1356                 len += sprintf(buf + len, "0x%06lx %#8lx",
1357                                virt_to_omap_mmu(mmu, vadr), size);
1358
1359                 if (type == EXMAP_TYPE_FB) {
1360                         len += sprintf(buf + len, "    framebuf\n");
1361                 } else {
1362                         len += sprintf(buf + len, "\n");
1363                         idx = i;
1364                         do {
1365                                 ent = mmu->exmap_tbl + idx;
1366                                 len += sprintf(buf + len,
1367                                                /* 0xc0171000 0x100000  0*/
1368                                                "%19s0x%8p %#8lx %2d\n",
1369                                                "", ent->buf,
1370                                                PAGE_SIZE << ent->order,
1371                                                ent->usecount);
1372                         } while ((idx = ent->link.next) >= 0);
1373                 }
1374
1375                 i++;
1376         }
1377
1378         up_read(&mmu->exmap_sem);
1379         return len;
1380 }
1381
1382 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1383                            const char *buf,
1384                            size_t count)
1385 {
1386         struct omap_mmu *mmu = dev_get_drvdata(dev);
1387         unsigned long base = 0, len = 0;
1388         int ret;
1389
1390         sscanf(buf, "%lx %lx", &base, &len);
1391
1392         if (!base)
1393                 return -EINVAL;
1394
1395         if (len) {
1396                 /* Add the mapping */
1397                 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1398                 if (ret < 0)
1399                         return ret;
1400         } else {
1401                 /* Remove the mapping */
1402                 ret = omap_mmu_exunmap(mmu, base);
1403                 if (ret < 0)
1404                         return ret;
1405         }
1406
1407         return count;
1408 }
1409
1410 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1411
1412 static ssize_t mempool_show(struct class *class, char *buf)
1413 {
1414         int min_nr_1M = 0, curr_nr_1M = 0;
1415         int min_nr_64K = 0, curr_nr_64K = 0;
1416         int total = 0;
1417
1418         if (likely(mempool_1M)) {
1419                 min_nr_1M  = mempool_1M->min_nr;
1420                 curr_nr_1M = mempool_1M->curr_nr;
1421                 total += min_nr_1M * SZ_1M;
1422         }
1423         if (likely(mempool_64K)) {
1424                 min_nr_64K  = mempool_64K->min_nr;
1425                 curr_nr_64K = mempool_64K->curr_nr;
1426                 total += min_nr_64K * SZ_64K;
1427         }
1428
1429         return sprintf(buf,
1430                        "0x%x\n"
1431                        "1M  buffer: %d (%d free)\n"
1432                        "64K buffer: %d (%d free)\n",
1433                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1434 }
1435
1436
1437 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1438
1439 static void omap_mmu_class_dev_release(struct device *dev)
1440 {
1441 }
1442
1443 static struct class omap_mmu_class = {
1444         .name           = "mmu",
1445         .dev_release    = omap_mmu_class_dev_release,
1446 };
1447
1448 int omap_mmu_register(struct omap_mmu *mmu)
1449 {
1450         int ret;
1451
1452         mmu->dev.class = &omap_mmu_class;
1453         strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1454         dev_set_drvdata(&mmu->dev, mmu);
1455
1456         mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1457                                  GFP_KERNEL);
1458         if (!mmu->exmap_tbl)
1459                 return -ENOMEM;
1460
1461         if (mmu->ops->pte_get_attr) {
1462                 struct mm_struct *mm =  mm_alloc();
1463                 if (!mm) {
1464                         ret = -ENOMEM;
1465                         goto err_mm_alloc;
1466                 }
1467                 mmu->twl_mm = mm;
1468         }
1469
1470         ret = device_register(&mmu->dev);
1471         if (unlikely(ret))
1472                 goto err_dev_register;
1473
1474         init_rwsem(&mmu->exmap_sem);
1475
1476         ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1477         printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1478                mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1479
1480         ret = omap_mmu_init(mmu);
1481         if (unlikely(ret))
1482                 goto err_mmu_init;
1483
1484         ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1485         if (unlikely(ret))
1486                 goto err_dev_create_mmu;
1487         ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1488         if (unlikely(ret))
1489                 goto err_dev_create_exmap;
1490
1491         if (likely(mmu->membase)) {
1492                 dev_attr_mem.size = mmu->memsize;
1493                 ret = device_create_bin_file(&mmu->dev,
1494                                              &dev_attr_mem);
1495                 if (unlikely(ret))
1496                         goto err_bin_create_mem;
1497         }
1498
1499         return 0;
1500
1501 err_bin_create_mem:
1502         device_remove_file(&mmu->dev, &dev_attr_exmap);
1503 err_dev_create_exmap:
1504         device_remove_file(&mmu->dev, &dev_attr_mmu);
1505 err_dev_create_mmu:
1506         omap_mmu_shutdown(mmu);
1507 err_mmu_init:
1508         device_unregister(&mmu->dev);
1509 err_dev_register:
1510         kfree(mmu->twl_mm);
1511         mmu->twl_mm = NULL;
1512 err_mm_alloc:
1513         kfree(mmu->exmap_tbl);
1514         mmu->exmap_tbl = NULL;
1515         return ret;
1516 }
1517 EXPORT_SYMBOL_GPL(omap_mmu_register);
1518
1519 void omap_mmu_unregister(struct omap_mmu *mmu)
1520 {
1521         omap_mmu_shutdown(mmu);
1522         omap_mmu_kmem_release();
1523
1524         device_remove_file(&mmu->dev, &dev_attr_mmu);
1525         device_remove_file(&mmu->dev, &dev_attr_exmap);
1526
1527         if (likely(mmu->membase))
1528                 device_remove_bin_file(&mmu->dev,
1529                                              &dev_attr_mem);
1530
1531         kfree(mmu->exmap_tbl);
1532         mmu->exmap_tbl = NULL;
1533
1534         if (mmu->ops->pte_get_attr) {
1535                 if (mmu->twl_mm) {
1536                         __mmdrop(mmu->twl_mm);
1537                         mmu->twl_mm = NULL;
1538                 }
1539         }
1540
1541         device_unregister(&mmu->dev);
1542 }
1543 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1544
1545 static int __init omap_mmu_class_init(void)
1546 {
1547         int ret = class_register(&omap_mmu_class);
1548         if (!ret)
1549                 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1550
1551         return ret;
1552 }
1553
1554 static void __exit omap_mmu_class_exit(void)
1555 {
1556         class_remove_file(&omap_mmu_class, &class_attr_mempool);
1557         class_unregister(&omap_mmu_class);
1558 }
1559
1560 subsys_initcall(omap_mmu_class_init);
1561 module_exit(omap_mmu_class_exit);
1562
1563 MODULE_LICENSE("GPL");