]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/mmu.c
REMOVE OMAP LEGACY CODE: Reset mach-omap1/board-*.c files to mainline
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
1 /*
2  * linux/arch/arm/plat-omap/mmu.c
3  *
4  * OMAP MMU management framework
5  *
6  * Copyright (C) 2002-2006 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <lethal@linux-sh.org>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 #include <linux/module.h>
18 #include <linux/mempool.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/clk.h>
23 #include <linux/device.h>
24 #include <linux/interrupt.h>
25 #include <linux/uaccess.h>
26 #include <linux/io.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
29 #include <mach/mmu.h>
30 #include <asm/sizes.h>
31 #include <mach/dsp_common.h>
32
33 #if defined(CONFIG_ARCH_OMAP1)
34 #include "../mach-omap1/mmu.h"
35 #elif defined(CONFIG_ARCH_OMAP2)
36 #include "../mach-omap2/mmu.h"
37 #endif
38
39 /*
40  * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
41  * MMU has base and victim implemented in different bits in the LOCK
42  * register (shifts are still the same), all of the other registers are
43  * the same on all of the MMUs..
44  */
45 #define MMU_LOCK_BASE_SHIFT             10
46 #define MMU_LOCK_VICTIM_SHIFT           4
47
48 #define CAMERA_MMU_LOCK_BASE_MASK       (0x7 << MMU_LOCK_BASE_SHIFT)
49 #define CAMERA_MMU_LOCK_VICTIM_MASK     (0x7 << MMU_LOCK_VICTIM_SHIFT)
50
51 #define is_aligned(adr, align)  (!((adr)&((align)-1)))
52 #define ORDER_1MB       (20 - PAGE_SHIFT)
53 #define ORDER_64KB      (16 - PAGE_SHIFT)
54 #define ORDER_4KB       (12 - PAGE_SHIFT)
55
56 #define MMU_CNTL_EMUTLBUPDATE   (1<<3)
57 #define MMU_CNTL_TWLENABLE      (1<<2)
58 #define MMU_CNTL_MMUENABLE      (1<<1)
59
60 static mempool_t *mempool_1M;
61 static mempool_t *mempool_64K;
62
63 #define omap_mmu_for_each_tlb_entry(mmu, entry)                 \
64         for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
65              entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
66              entry++)
67
68 #define to_dev(obj)     container_of(obj, struct device, kobj)
69
70 static void *mempool_alloc_from_pool(mempool_t *pool,
71                                      unsigned int __nocast gfp_mask)
72 {
73         spin_lock_irq(&pool->lock);
74         if (likely(pool->curr_nr)) {
75                 void *element = pool->elements[--pool->curr_nr];
76                 spin_unlock_irq(&pool->lock);
77                 return element;
78         }
79
80         spin_unlock_irq(&pool->lock);
81         return mempool_alloc(pool, gfp_mask);
82 }
83
84 /*
85  * kmem_reserve(), kmem_release():
86  * reserve or release kernel memory for exmap().
87  *
88  * exmap() might request consecutive 1MB or 64kB,
89  * but it will be difficult after memory pages are fragmented.
90  * So, user can reserve such memory blocks in the early phase
91  * through kmem_reserve().
92  */
93 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
94 {
95         return (void *)__get_dma_pages(gfp, (unsigned int)order);
96 }
97
98 static void omap_mmu_pool_free(void *buf, void *order)
99 {
100         free_pages((unsigned long)buf, (unsigned int)order);
101 }
102
103 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
104 {
105         unsigned long len = size;
106
107         /* alignment check */
108         if (!is_aligned(size, SZ_64K)) {
109                 dev_err(mmu->dev,
110                         "MMU %s: size(0x%lx) is not multiple of 64KB.\n",
111                         mmu->name, size);
112                 return -EINVAL;
113         }
114
115         if (size > (1 << mmu->addrspace)) {
116                 dev_err(mmu->dev,
117                         "MMU %s: size(0x%lx) is larger than external device "
118                         " memory space size (0x%x.\n", mmu->name, size,
119                         (1 << mmu->addrspace));
120                 return -EINVAL;
121         }
122
123         if (size >= SZ_1M) {
124                 int nr = size >> 20;
125
126                 if (likely(!mempool_1M))
127                         mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
128                                                     omap_mmu_pool_free,
129                                                     (void *)ORDER_1MB);
130                 else
131                         mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
132                                        GFP_KERNEL);
133
134                 size &= ~(0xf << 20);
135         }
136
137         if (size >= SZ_64K) {
138                 int nr = size >> 16;
139
140                 if (likely(!mempool_64K))
141                         mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
142                                                      omap_mmu_pool_free,
143                                                      (void *)ORDER_64KB);
144                 else
145                         mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
146                                        GFP_KERNEL);
147
148                 size &= ~(0xf << 16);
149         }
150
151         if (size)
152                 len -= size;
153
154         return len;
155 }
156 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
157
158 void omap_mmu_kmem_release(void)
159 {
160         if (mempool_64K) {
161                 mempool_destroy(mempool_64K);
162                 mempool_64K = NULL;
163         }
164
165         if (mempool_1M) {
166                 mempool_destroy(mempool_1M);
167                 mempool_1M = NULL;
168         }
169 }
170 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
171
172 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
173 {
174         struct page *page, *ps, *pe;
175
176         ps = virt_to_page(buf);
177         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
178
179         for (page = ps; page < pe; page++)
180                 ClearPageReserved(page);
181
182         if ((order == ORDER_64KB) && likely(mempool_64K))
183                 mempool_free((void *)buf, mempool_64K);
184         else if ((order == ORDER_1MB) && likely(mempool_1M))
185                 mempool_free((void *)buf, mempool_1M);
186         else
187                 free_pages(buf, order);
188 }
189
190 /*
191  * ARM MMU operations
192  */
193 int exmap_set_armmmu(struct omap_mmu *mmu, unsigned long virt,
194                      unsigned long phys, unsigned long size)
195 {
196         long off;
197         unsigned long sz_left;
198         pmd_t *pmdp;
199         pte_t *ptep;
200         int prot_pmd, prot_pte;
201
202         dev_dbg(mmu->dev,
203                 "MMU %s: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
204                 mmu->name, virt, phys, size);
205
206         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
207         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
208
209         pmdp = pmd_offset(pgd_offset_k(virt), virt);
210         if (pmd_none(*pmdp)) {
211                 ptep = pte_alloc_one_kernel(&init_mm, 0);
212                 if (ptep == NULL)
213                         return -ENOMEM;
214                 /* note: two PMDs will be set  */
215                 pmd_populate_kernel(&init_mm, pmdp, ptep);
216         }
217
218         off = phys - virt;
219         for (sz_left = size;
220              sz_left >= PAGE_SIZE;
221              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
222                 ptep = pte_offset_kernel(pmdp, virt);
223                 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
224         }
225         if (sz_left)
226                 BUG();
227
228         return 0;
229 }
230 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
231
232 void exmap_clear_armmmu(struct omap_mmu *mmu, unsigned long virt,
233                         unsigned long size)
234 {
235         unsigned long sz_left;
236         pmd_t *pmdp;
237         pte_t *ptep;
238
239         dev_dbg(mmu->dev,
240                 "MMU %s: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
241                 mmu->name, virt, size);
242
243         for (sz_left = size;
244              sz_left >= PAGE_SIZE;
245              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
246                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
247                 ptep = pte_offset_kernel(pmdp, virt);
248                 pte_clear(&init_mm, virt, ptep);
249         }
250         if (sz_left)
251                 BUG();
252 }
253 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
254
255 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
256 {
257         /* exmap_sem should be held before calling this function */
258         struct exmap_tbl *ent;
259
260 start:
261         omap_mmu_for_each_tlb_entry(mmu, ent) {
262                 void *mapadr;
263                 unsigned long mapsize;
264
265                 if (!ent->valid)
266                         continue;
267                 mapadr = (void *)ent->vadr;
268                 mapsize = 1 << (ent->order + PAGE_SHIFT);
269                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
270                         if (vadr + len <= mapadr + mapsize) {
271                                 /* this map covers whole address. */
272                                 return 1;
273                         } else {
274                                 /*
275                                  * this map covers partially.
276                                  * check rest portion.
277                                  */
278                                 len -= mapadr + mapsize - vadr;
279                                 vadr = mapadr + mapsize;
280                                 goto start;
281                         }
282                 }
283         }
284
285         return 0;
286 }
287 EXPORT_SYMBOL_GPL(exmap_valid);
288
289 /*
290  * omap_mmu_exmap_use(), unuse():
291  * when the mapped area is exported to user space with mmap,
292  * the usecount is incremented.
293  * while the usecount > 0, that area can't be released.
294  */
295 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
296 {
297         struct exmap_tbl *ent;
298
299         down_write(&mmu->exmap_sem);
300         omap_mmu_for_each_tlb_entry(mmu, ent) {
301                 void *mapadr;
302                 unsigned long mapsize;
303
304                 if (!ent->valid)
305                         continue;
306                 mapadr = (void *)ent->vadr;
307                 mapsize = 1 << (ent->order + PAGE_SHIFT);
308                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
309                         ent->usecount++;
310         }
311         up_write(&mmu->exmap_sem);
312 }
313 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
314
315 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
316 {
317         struct exmap_tbl *ent;
318
319         down_write(&mmu->exmap_sem);
320         omap_mmu_for_each_tlb_entry(mmu, ent) {
321                 void *mapadr;
322                 unsigned long mapsize;
323
324                 if (!ent->valid)
325                         continue;
326                 mapadr = (void *)ent->vadr;
327                 mapsize = 1 << (ent->order + PAGE_SHIFT);
328                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
329                         ent->usecount--;
330         }
331         up_write(&mmu->exmap_sem);
332 }
333 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
334
335 /*
336  * omap_mmu_virt_to_phys()
337  * returns physical address, and sets len to valid length
338  */
339 unsigned long
340 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
341 {
342         struct exmap_tbl *ent;
343
344         if (omap_mmu_internal_memory(mmu, vadr)) {
345                 unsigned long addr = (unsigned long)vadr;
346                 *len = mmu->membase + mmu->memsize - addr;
347                 return addr;
348         }
349
350         /* EXRAM */
351         omap_mmu_for_each_tlb_entry(mmu, ent) {
352                 void *mapadr;
353                 unsigned long mapsize;
354
355                 if (!ent->valid)
356                         continue;
357                 mapadr = (void *)ent->vadr;
358                 mapsize = 1 << (ent->order + PAGE_SHIFT);
359                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
360                         *len = mapadr + mapsize - vadr;
361                         return __pa(ent->buf) + vadr - mapadr;
362                 }
363         }
364
365         /* valid mapping not found */
366         return 0;
367 }
368 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
369
370 /*
371  * PTE operations
372  */
373 static inline void
374 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
375                        unsigned long phys, int prot)
376 {
377         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
378         if (virt & (1 << SECTION_SHIFT))
379                 pmdp++;
380         *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
381         flush_pmd_entry(pmdp);
382 }
383
384 static inline void
385 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
386                             unsigned long phys, int prot)
387 {
388         int i;
389         for (i = 0; i < 16; i += 1) {
390                 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
391                 virt += (PGDIR_SIZE / 2);
392         }
393 }
394
395 static inline int
396 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
397                     unsigned long phys, pgprot_t prot)
398 {
399         pte_t *ptep;
400         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
401
402         if (!(prot & PTE_TYPE_MASK))
403                 prot |= PTE_TYPE_SMALL;
404
405         if (pmd_none(*pmdp)) {
406                 ptep = pte_alloc_one_kernel(mm, virt);
407                 if (ptep == NULL)
408                         return -ENOMEM;
409                 pmd_populate_kernel(mm, pmdp, ptep);
410         }
411         ptep = pte_offset_kernel(pmdp, virt);
412         ptep -= PTRS_PER_PTE;
413         *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
414         flush_pmd_entry((pmd_t *)ptep);
415         return 0;
416 }
417
418 static inline int
419 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
420                          unsigned long phys, pgprot_t prot)
421 {
422         int i, ret;
423         for (i = 0; i < 16; i += 1) {
424                 ret = omap_mmu_alloc_page(mm, virt, phys,
425                                           prot | PTE_TYPE_LARGE);
426                 if (ret)
427                         return -ENOMEM; /* only 1st time */
428                 virt += PAGE_SIZE;
429         }
430         return 0;
431 }
432
433 static int omap_mmu_load_pte(struct omap_mmu *mmu,
434                              struct omap_mmu_tlb_entry *e)
435 {
436         int ret = 0;
437         struct mm_struct *mm = mmu->twl_mm;
438         const unsigned long va = e->va;
439         const unsigned long pa = e->pa;
440         const pgprot_t prot = mmu->ops->pte_get_attr(e);
441
442         spin_lock(&mm->page_table_lock);
443
444         switch (e->pgsz) {
445         case OMAP_MMU_CAM_PAGESIZE_16MB:
446                 omap_mmu_alloc_supersection(mm, va, pa, prot);
447                 break;
448         case OMAP_MMU_CAM_PAGESIZE_1MB:
449                 omap_mmu_alloc_section(mm, va, pa, prot);
450                 break;
451         case OMAP_MMU_CAM_PAGESIZE_64KB:
452                 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
453                 break;
454         case OMAP_MMU_CAM_PAGESIZE_4KB:
455                 ret = omap_mmu_alloc_page(mm, va, pa, prot);
456                 break;
457         default:
458                 BUG();
459                 break;
460         }
461
462         spin_unlock(&mm->page_table_lock);
463
464         return ret;
465 }
466
467 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
468 {
469         pte_t *ptep, *end;
470         pmd_t *pmdp;
471         struct mm_struct *mm = mmu->twl_mm;
472
473         spin_lock(&mm->page_table_lock);
474
475         pmdp = pmd_offset(pgd_offset(mm, virt), virt);
476
477         if (pmd_none(*pmdp))
478                 goto out;
479
480         if (!pmd_table(*pmdp))
481                 goto invalidate_pmd;
482
483         ptep = pte_offset_kernel(pmdp, virt);
484         pte_clear(mm, virt, ptep);
485         flush_pmd_entry((pmd_t *)ptep);
486
487         /* zap pte */
488         end = pmd_page_vaddr(*pmdp);
489         ptep = end - PTRS_PER_PTE;
490         while (ptep < end) {
491                 if (!pte_none(*ptep))
492                         goto out;
493                 ptep++;
494         }
495         pte_free_kernel(mm, pmd_page_vaddr(*pmdp));
496
497  invalidate_pmd:
498         pmd_clear(pmdp);
499         flush_pmd_entry(pmdp);
500  out:
501         spin_unlock(&mm->page_table_lock);
502 }
503
504 /*
505  * TLB operations
506  */
507 static struct cam_ram_regset *
508 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
509 {
510         return mmu->ops->cam_ram_alloc(mmu, entry);
511 }
512
513 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
514                                   struct cam_ram_regset *cr)
515 {
516         return mmu->ops->cam_ram_valid(cr);
517 }
518
519 static inline void
520 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
521 {
522         unsigned long lock = omap_mmu_read_reg(mmu, OMAP_MMU_LOCK);
523         int mask;
524
525         mask = (mmu->type == OMAP_MMU_CAMERA) ?
526                         CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
527         tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
528
529         mask = (mmu->type == OMAP_MMU_CAMERA) ?
530                         CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
531         tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
532 }
533
534 static inline void
535 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
536 {
537         omap_mmu_write_reg(mmu,
538                            (lock->base << MMU_LOCK_BASE_SHIFT) |
539                            (lock->victim << MMU_LOCK_VICTIM_SHIFT),
540                            OMAP_MMU_LOCK);
541 }
542
543 static inline void omap_mmu_flush(struct omap_mmu *mmu)
544 {
545         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_FLUSH_ENTRY);
546 }
547
548 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
549 {
550         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_LD_TLB);
551 }
552
553 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
554                        struct cam_ram_regset *cr)
555 {
556         /* set victim */
557         omap_mmu_set_tlb_lock(mmu, lock);
558
559         if (likely(mmu->ops->read_tlb))
560                 mmu->ops->read_tlb(mmu, cr);
561 }
562 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
563
564 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
565 {
566         if (likely(mmu->ops->load_tlb))
567                 mmu->ops->load_tlb(mmu, cr);
568
569         /* flush the entry */
570         omap_mmu_flush(mmu);
571
572         /* load a TLB entry */
573         omap_mmu_ldtlb(mmu);
574 }
575
576 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
577                             struct omap_mmu_tlb_entry *entry)
578 {
579         struct omap_mmu_tlb_lock lock;
580         struct cam_ram_regset *cr;
581         int ret;
582
583         clk_enable(mmu->clk);
584         ret = omap_dsp_request_mem();
585         if (ret < 0)
586                 goto out;
587
588         omap_mmu_get_tlb_lock(mmu, &lock);
589         for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
590                 struct cam_ram_regset tmp;
591
592                 /* read a TLB entry */
593                 omap_mmu_read_tlb(mmu, &lock, &tmp);
594                 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
595                         goto found_victim;
596         }
597         omap_mmu_set_tlb_lock(mmu, &lock);
598
599 found_victim:
600         /* The last entry cannot be locked? */
601         if (lock.victim == (mmu->nr_tlb_entries - 1)) {
602                 dev_err(mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
603                 return -EBUSY;
604         }
605
606         cr = omap_mmu_cam_ram_alloc(mmu, entry);
607         if (IS_ERR(cr))
608                 return PTR_ERR(cr);
609
610         omap_mmu_load_tlb(mmu, cr);
611         kfree(cr);
612
613         /* update lock base */
614         if (lock.victim == lock.base)
615                 lock.base++;
616
617         omap_mmu_set_tlb_lock(mmu, &lock);
618
619         omap_dsp_release_mem();
620 out:
621         clk_disable(mmu->clk);
622         return 0;
623 }
624 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
625
626 static inline unsigned long
627 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
628 {
629         return mmu->ops->cam_va(cr);
630 }
631
632 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
633 {
634         struct omap_mmu_tlb_lock lock;
635         int i, ret = 0;
636         int max_valid = 0;
637
638         clk_enable(mmu->clk);
639         ret = omap_dsp_request_mem();
640         if (ret < 0)
641                 goto out;
642
643         omap_mmu_get_tlb_lock(mmu, &lock);
644         for (i = 0; i < lock.base; i++) {
645                 struct cam_ram_regset cr;
646
647                 /* read a TLB entry */
648                 lock.victim = i;
649                 omap_mmu_read_tlb(mmu, &lock, &cr);
650                 if (!omap_mmu_cam_ram_valid(mmu, &cr))
651                         continue;
652
653                 if (omap_mmu_cam_va(mmu, &cr) == vadr)
654                         /* flush the entry */
655                         omap_mmu_flush(mmu);
656                 else
657                         max_valid = i;
658         }
659
660         /* set new lock base */
661         lock.base = lock.victim = max_valid + 1;
662         omap_mmu_set_tlb_lock(mmu, &lock);
663
664         omap_dsp_release_mem();
665 out:
666         clk_disable(mmu->clk);
667         return ret;
668 }
669 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
670
671 static void omap_mmu_gflush(struct omap_mmu *mmu)
672 {
673         struct omap_mmu_tlb_lock lock;
674         int ret;
675
676         clk_enable(mmu->clk);
677         ret = omap_dsp_request_mem();
678         if (ret < 0)
679                 goto out;
680
681         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_GFLUSH);
682         lock.base = lock.victim = mmu->nr_exmap_preserved;
683         omap_mmu_set_tlb_lock(mmu, &lock);
684
685         omap_dsp_release_mem();
686 out:
687         clk_disable(mmu->clk);
688 }
689
690 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
691                             struct omap_mmu_tlb_entry *entry)
692 {
693         int ret = -1;
694         /*XXX use PG_flag for prsvd */
695         ret = omap_mmu_load_pte(mmu, entry);
696         if (ret)
697                 return ret;
698         if (entry->tlb)
699                 ret = omap_mmu_load_tlb_entry(mmu, entry);
700         return ret;
701 }
702 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
703
704 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
705 {
706         int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
707         if (ret)
708                 return ret;
709         omap_mmu_clear_pte(mmu, vadr);
710         return ret;
711 }
712 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
713
714 /*
715  * omap_mmu_exmap()
716  *
717  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
718  * In this case, the buffer for external device is allocated in this routine,
719  * then it is mapped.
720  * On the other hand, for example - frame buffer sharing, calls
721  * this function with padr set. It means some known address space
722  * pointed with padr is going to be shared with external device.
723  */
724 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long devadr,
725                    unsigned long padr, unsigned long size,
726                    enum exmap_type type)
727 {
728         unsigned long pgsz;
729         void *buf;
730         unsigned int order = 0;
731         unsigned long unit;
732         int prev = -1;
733         unsigned long _devadr = devadr;
734         unsigned long _padr = padr;
735         void *_vadr = omap_mmu_to_virt(mmu, devadr);
736         unsigned long _size = size;
737         struct omap_mmu_tlb_entry tlb_ent;
738         struct exmap_tbl *exmap_ent, *tmp_ent;
739         int status;
740         int idx;
741
742 #define MINIMUM_PAGESZ  SZ_4K
743         /*
744          * alignment check
745          */
746         if (!is_aligned(size, MINIMUM_PAGESZ)) {
747                 dev_err(mmu->dev,
748                         "MMU %s: size(0x%lx) is not multiple of 4KB.\n",
749                         mmu->name, size);
750                 return -EINVAL;
751         }
752         if (!is_aligned(devadr, MINIMUM_PAGESZ)) {
753                 dev_err(mmu->dev,
754                         "MMU %s: external device address(0x%lx) is not"
755                         " aligned.\n", mmu->name, devadr);
756                 return -EINVAL;
757         }
758         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
759                 dev_err(mmu->dev,
760                         "MMU %s: physical address(0x%lx) is not aligned.\n",
761                         mmu->name, padr);
762                 return -EINVAL;
763         }
764
765         /* address validity check */
766         if ((devadr < mmu->memsize) ||
767             (devadr >= (1 << mmu->addrspace))) {
768                 dev_err(mmu->dev,
769                         "MMU %s: illegal address/size for %s().\n",
770                         mmu->name, __func__);
771                 return -EINVAL;
772         }
773
774         down_write(&mmu->exmap_sem);
775
776         /* overlap check */
777         omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
778                 unsigned long mapsize;
779
780                 if (!tmp_ent->valid)
781                         continue;
782                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
783                 if ((_vadr + size > tmp_ent->vadr) &&
784                     (_vadr < tmp_ent->vadr + mapsize)) {
785                         dev_err(mmu->dev, "MMU %s: exmap page overlap!\n",
786                                 mmu->name);
787                         up_write(&mmu->exmap_sem);
788                         return -EINVAL;
789                 }
790         }
791
792 start:
793         buf = NULL;
794         /* Are there any free TLB lines?  */
795         for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
796                 if (!mmu->exmap_tbl[idx].valid)
797                         goto found_free;
798
799         dev_err(mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
800         status = -EBUSY;
801         goto fail;
802
803 found_free:
804         exmap_ent = mmu->exmap_tbl + idx;
805
806         if ((_size >= SZ_1M) &&
807             (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
808             is_aligned(_devadr, SZ_1M)) {
809                 unit = SZ_1M;
810                 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
811         } else if ((_size >= SZ_64K) &&
812                    (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
813                    is_aligned(_devadr, SZ_64K)) {
814                 unit = SZ_64K;
815                 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
816         } else {
817                 unit = SZ_4K;
818                 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
819         }
820
821         order = get_order(unit);
822
823         /* buffer allocation */
824         if (type == EXMAP_TYPE_MEM) {
825                 struct page *page, *ps, *pe;
826
827                 if ((order == ORDER_1MB) && likely(mempool_1M))
828                         buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
829                 else if ((order == ORDER_64KB) && likely(mempool_64K))
830                         buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
831                 else {
832                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
833                         if (buf == NULL) {
834                                 status = -ENOMEM;
835                                 goto fail;
836                         }
837                 }
838
839                 /* mark the pages as reserved; this is needed for mmap */
840                 ps = virt_to_page(buf);
841                 pe = virt_to_page(buf + unit);
842
843                 for (page = ps; page < pe; page++)
844                         SetPageReserved(page);
845
846                 _padr = __pa(buf);
847         }
848
849         /*
850          * mapping for ARM MMU:
851          * we should not access to the allocated memory through 'buf'
852          * since this area should not be cached.
853          */
854         status = exmap_set_armmmu(mmu, (unsigned long)_vadr, _padr, unit);
855         if (status < 0)
856                 goto fail;
857
858         /* loading external device PTE entry */
859         INIT_TLB_ENTRY(&tlb_ent, _devadr, _padr, pgsz);
860         status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
861         if (status < 0) {
862                 exmap_clear_armmmu(mmu, (unsigned long)_vadr, unit);
863                 goto fail;
864         }
865
866         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
867         exmap_ent->link.prev = prev;
868         if (prev >= 0)
869                 mmu->exmap_tbl[prev].link.next = idx;
870
871         if ((_size -= unit) == 0) {     /* normal completion */
872                 up_write(&mmu->exmap_sem);
873                 return size;
874         }
875
876         _devadr += unit;
877         _vadr   += unit;
878         _padr = padr ? _padr + unit : 0;
879         prev = idx;
880         goto start;
881
882 fail:
883         up_write(&mmu->exmap_sem);
884         if (buf)
885                 omap_mmu_free_pages((unsigned long)buf, order);
886         omap_mmu_exunmap(mmu, devadr);
887         return status;
888 }
889 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
890
891 static unsigned long unmap_free_arm(struct omap_mmu *mmu,
892                                     struct exmap_tbl *ent)
893 {
894         unsigned long size;
895
896         /* clearing ARM MMU */
897         size = 1 << (ent->order + PAGE_SHIFT);
898         exmap_clear_armmmu(mmu, (unsigned long)ent->vadr, size);
899
900         /* freeing allocated memory */
901         if (ent->type == EXMAP_TYPE_MEM) {
902                 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
903                 dev_dbg(mmu->dev, "MMU %s: freeing 0x%lx bytes @ adr 0x%8p\n",
904                         mmu->name, size, ent->buf);
905         }
906
907         ent->valid = 0;
908         return size;
909 }
910
911 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long devadr)
912 {
913         void *vadr;
914         unsigned long size;
915         int total = 0;
916         struct exmap_tbl *ent;
917         int idx;
918
919         vadr = omap_mmu_to_virt(mmu, devadr);
920         down_write(&mmu->exmap_sem);
921         for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
922                 ent = mmu->exmap_tbl + idx;
923                 if (!ent->valid || ent->prsvd)
924                         continue;
925                 if (ent->vadr == vadr)
926                         goto found_map;
927         }
928         up_write(&mmu->exmap_sem);
929         dev_warn(mmu->dev, "MMU %s: address %06lx not found in exmap_tbl.\n",
930                  mmu->name, devadr);
931         return -EINVAL;
932
933 found_map:
934         if (ent->usecount > 0) {
935                 dev_err(mmu->dev, "MMU %s: exmap reference count is not 0.\n"
936                         "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
937                         mmu->name, idx, ent->vadr, ent->order, ent->usecount);
938                 up_write(&mmu->exmap_sem);
939                 return -EINVAL;
940         }
941         /* clearing external device PTE entry */
942         omap_mmu_clear_pte_entry(mmu, devadr);
943
944         /* clear ARM MMU and free buffer */
945         size = unmap_free_arm(mmu, ent);
946         total += size;
947
948         /* we don't free PTEs */
949
950         /* flush TLB */
951         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
952
953         /* check if next mapping is in same group */
954         idx = ent->link.next;
955         if (idx < 0)
956                 goto up_out;    /* normal completion */
957         ent = mmu->exmap_tbl + idx;
958         devadr += size;
959         vadr   += size;
960         if (ent->vadr == vadr)
961                 goto found_map; /* continue */
962
963         dev_err(mmu->dev, "MMU %s: illegal exmap_tbl grouping!\n"
964                 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
965                 mmu->name, vadr, idx, ent->vadr);
966         up_write(&mmu->exmap_sem);
967         return -EINVAL;
968
969 up_out:
970         up_write(&mmu->exmap_sem);
971         return total;
972 }
973 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
974
975 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
976 {
977         struct exmap_tbl *ent;
978
979         down_write(&mmu->exmap_sem);
980
981         /* clearing TLB entry */
982         omap_mmu_gflush(mmu);
983
984         omap_mmu_for_each_tlb_entry(mmu, ent)
985                 if (ent->valid && !ent->prsvd)
986                         unmap_free_arm(mmu, ent);
987
988         /* flush TLB */
989         if (likely(mmu->membase))
990                 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
991                                        mmu->membase + (1 << mmu->addrspace));
992
993         up_write(&mmu->exmap_sem);
994 }
995 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
996
997 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
998                                     unsigned long devadr, int index)
999 {
1000         unsigned long phys;
1001         void *virt;
1002         struct omap_mmu_tlb_entry tlb_ent;
1003
1004         phys = __pa(buf);
1005         virt = omap_mmu_to_virt(mmu, devadr);
1006         exmap_set_armmmu(mmu, (unsigned long)virt, phys, PAGE_SIZE);
1007         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1008         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, devadr, phys);
1009         omap_mmu_load_pte_entry(mmu, &tlb_ent);
1010 }
1011 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1012
1013 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long devadr)
1014 {
1015         void *virt = omap_mmu_to_virt(mmu, devadr);
1016
1017         exmap_clear_armmmu(mmu, (unsigned long)virt, PAGE_SIZE);
1018         /* DSP MMU is shutting down. not handled here. */
1019 }
1020 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1021
1022 static void omap_mmu_reset(struct omap_mmu *mmu)
1023 {
1024 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1025         int i;
1026
1027         omap_mmu_write_reg(mmu, 0x2, OMAP_MMU_SYSCONFIG);
1028
1029         for (i = 0; i < 10000; i++)
1030                 if (likely(omap_mmu_read_reg(mmu, OMAP_MMU_SYSSTATUS) & 0x1))
1031                         break;
1032 #endif
1033 }
1034
1035 void omap_mmu_disable(struct omap_mmu *mmu)
1036 {
1037         omap_mmu_write_reg(mmu, 0x00, OMAP_MMU_CNTL);
1038 }
1039 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1040
1041 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1042 {
1043         u32 val = OMAP_MMU_CNTL_MMU_EN | MMU_CNTL_TWLENABLE;
1044
1045         if (likely(reset))
1046                 omap_mmu_reset(mmu);
1047 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1048         omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd),
1049                            OMAP_MMU_TTB);
1050 #else
1051         omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) & 0xffff,
1052                            OMAP_MMU_TTB_L);
1053         omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) >> 16,
1054                            OMAP_MMU_TTB_H);
1055         val |= OMAP_MMU_CNTL_RESET_SW;
1056 #endif
1057         omap_mmu_write_reg(mmu, val, OMAP_MMU_CNTL);
1058 }
1059 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1060
1061 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1062 {
1063         struct omap_mmu *mmu = dev_id;
1064
1065         if (likely(mmu->ops->interrupt))
1066                 mmu->ops->interrupt(mmu);
1067
1068         return IRQ_HANDLED;
1069 }
1070
1071 static int omap_mmu_init(struct omap_mmu *mmu)
1072 {
1073         struct omap_mmu_tlb_lock tlb_lock;
1074         int ret = 0;
1075
1076         clk_enable(mmu->clk);
1077         ret = omap_dsp_request_mem();
1078         if (ret < 0)
1079                 goto out;
1080
1081         down_write(&mmu->exmap_sem);
1082
1083         ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1084                           mmu->name,  mmu);
1085         if (ret < 0) {
1086                 dev_err(mmu->dev, "MMU %s: failed to register MMU interrupt:"
1087                         " %d\n", mmu->name, ret);
1088                 goto fail;
1089         }
1090
1091         omap_mmu_disable(mmu);  /* clear all */
1092         udelay(100);
1093         omap_mmu_enable(mmu, 1);
1094
1095         memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1096         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1097
1098         if (unlikely(mmu->ops->startup))
1099                 ret = mmu->ops->startup(mmu);
1100 fail:
1101         up_write(&mmu->exmap_sem);
1102         omap_dsp_release_mem();
1103 out:
1104         clk_disable(mmu->clk);
1105
1106         return ret;
1107 }
1108
1109 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1110 {
1111         free_irq(mmu->irq, mmu);
1112
1113         if (unlikely(mmu->ops->shutdown))
1114                 mmu->ops->shutdown(mmu);
1115
1116         omap_mmu_exmap_flush(mmu);
1117         omap_mmu_disable(mmu); /* clear all */
1118 }
1119
1120 /*
1121  * omap_mmu_mem_enable() / disable()
1122  */
1123 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1124 {
1125         if (unlikely(mmu->ops->mem_enable))
1126                 return mmu->ops->mem_enable(mmu, addr);
1127
1128         down_read(&mmu->exmap_sem);
1129         return 0;
1130 }
1131 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1132
1133 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1134 {
1135         if (unlikely(mmu->ops->mem_disable)) {
1136                 mmu->ops->mem_disable(mmu, addr);
1137                 return;
1138         }
1139
1140         up_read(&mmu->exmap_sem);
1141 }
1142 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1143
1144 /*
1145  * dsp_mem file operations
1146  */
1147 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1148                            loff_t *ppos)
1149 {
1150         unsigned long p = *ppos;
1151         void *vadr = omap_mmu_to_virt(mmu, p);
1152         ssize_t size = mmu->memsize;
1153         ssize_t read;
1154
1155         if (p >= size)
1156                 return 0;
1157         clk_enable(mmu->memclk);
1158         read = count;
1159         if (count > size - p)
1160                 read = size - p;
1161         if (copy_to_user(buf, vadr, read)) {
1162                 read = -EFAULT;
1163                 goto out;
1164         }
1165         *ppos += read;
1166 out:
1167         clk_disable(mmu->memclk);
1168         return read;
1169 }
1170
1171 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1172                           loff_t *ppos)
1173 {
1174         unsigned long p = *ppos;
1175         void *vadr = omap_mmu_to_virt(mmu, p);
1176
1177         if (!exmap_valid(mmu, vadr, count)) {
1178                 dev_err(mmu->dev, "MMU %s: external device address %08lx / "
1179                         "size %08x is not valid!\n", mmu->name, p, count);
1180                 return -EFAULT;
1181         }
1182         if (count > (1 << mmu->addrspace) - p)
1183                 count = (1 << mmu->addrspace) - p;
1184         if (copy_to_user(buf, vadr, count))
1185                 return -EFAULT;
1186         *ppos += count;
1187
1188         return count;
1189 }
1190
1191 static ssize_t omap_mmu_mem_read(struct kobject *kobj,
1192                                  struct bin_attribute *attr,
1193                                  char *buf, loff_t offset, size_t count)
1194 {
1195         struct device *dev = to_dev(kobj);
1196         struct omap_mmu *mmu = dev_get_drvdata(dev);
1197         unsigned long p = (unsigned long)offset;
1198         void *vadr = omap_mmu_to_virt(mmu, p);
1199         int ret;
1200
1201         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1202                 return -EBUSY;
1203
1204         if (p < mmu->memsize)
1205                 ret = intmem_read(mmu, buf, count, &offset);
1206         else
1207                 ret = exmem_read(mmu, buf, count, &offset);
1208
1209         omap_mmu_mem_disable(mmu, vadr);
1210
1211         return ret;
1212 }
1213
1214 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1215                             loff_t *ppos)
1216 {
1217         unsigned long p = *ppos;
1218         void *vadr = omap_mmu_to_virt(mmu, p);
1219         ssize_t size = mmu->memsize;
1220         ssize_t written;
1221
1222         if (p >= size)
1223                 return 0;
1224         clk_enable(mmu->memclk);
1225         written = count;
1226         if (count > size - p)
1227                 written = size - p;
1228         if (copy_from_user(vadr, buf, written)) {
1229                 written = -EFAULT;
1230                 goto out;
1231         }
1232         *ppos += written;
1233 out:
1234         clk_disable(mmu->memclk);
1235         return written;
1236 }
1237
1238 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1239                            loff_t *ppos)
1240 {
1241         unsigned long p = *ppos;
1242         void *vadr = omap_mmu_to_virt(mmu, p);
1243
1244         if (!exmap_valid(mmu, vadr, count)) {
1245                 dev_err(mmu->dev, "MMU %s: external device address %08lx "
1246                         "/ size %08x is not valid!\n", mmu->name, p, count);
1247                 return -EFAULT;
1248         }
1249         if (count > (1 << mmu->addrspace) - p)
1250                 count = (1 << mmu->addrspace) - p;
1251         if (copy_from_user(vadr, buf, count))
1252                 return -EFAULT;
1253         *ppos += count;
1254
1255         return count;
1256 }
1257
1258 static ssize_t omap_mmu_mem_write(struct kobject *kobj,
1259                                   struct bin_attribute *attr,
1260                                   char *buf, loff_t offset, size_t count)
1261 {
1262         struct device *dev = to_dev(kobj);
1263         struct omap_mmu *mmu = dev_get_drvdata(dev);
1264         unsigned long p = (unsigned long)offset;
1265         void *vadr = omap_mmu_to_virt(mmu, p);
1266         int ret;
1267
1268         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1269                 return -EBUSY;
1270
1271         if (p < mmu->memsize)
1272                 ret = intmem_write(mmu, buf, count, &offset);
1273         else
1274                 ret = exmem_write(mmu, buf, count, &offset);
1275
1276         omap_mmu_mem_disable(mmu, vadr);
1277
1278         return ret;
1279 }
1280
1281 static struct bin_attribute dev_attr_mem = {
1282         .attr   = {
1283                 .name   = "mem",
1284                 .owner  = THIS_MODULE,
1285                 .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
1286         },
1287
1288         .read   = omap_mmu_mem_read,
1289         .write  = omap_mmu_mem_write,
1290 };
1291
1292 /* To be obsolete for backward compatibility */
1293 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu,
1294                             struct bin_attribute *attr,
1295                             char *buf, loff_t offset, size_t count)
1296 {
1297         return omap_mmu_mem_read(&mmu->dev->kobj, attr, buf, offset, count);
1298 }
1299 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1300
1301 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu,
1302                              struct bin_attribute *attr,
1303                              char *buf, loff_t offset, size_t count)
1304 {
1305         return omap_mmu_mem_write(&mmu->dev->kobj, attr, buf, offset, count);
1306 }
1307 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1308
1309 /*
1310  * sysfs files
1311  */
1312 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1313                              char *buf)
1314 {
1315         struct omap_mmu *mmu = dev_get_drvdata(dev);
1316         struct omap_mmu_tlb_lock tlb_lock;
1317         int ret;
1318
1319         clk_enable(mmu->clk);
1320         ret = omap_dsp_request_mem();
1321         if (ret < 0)
1322                 goto out;
1323
1324         down_read(&mmu->exmap_sem);
1325
1326         omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1327
1328         ret = -EIO;
1329         if (likely(mmu->ops->show))
1330                 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1331
1332         /* restore victim entry */
1333         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1334
1335         up_read(&mmu->exmap_sem);
1336         omap_dsp_release_mem();
1337 out:
1338         clk_disable(mmu->clk);
1339
1340         return ret;
1341 }
1342
1343 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1344
1345 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1346                           char *buf)
1347 {
1348         struct omap_mmu *mmu = dev_get_drvdata(dev);
1349         struct exmap_tbl *ent;
1350         int len;
1351         int i = 0;
1352
1353         down_read(&mmu->exmap_sem);
1354         len = sprintf(buf, "  devadr     size         buf     size uc\n");
1355                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
1356
1357         omap_mmu_for_each_tlb_entry(mmu, ent) {
1358                 void *vadr;
1359                 unsigned long size;
1360                 enum exmap_type type;
1361                 int idx;
1362
1363                 /* find a top of link */
1364                 if (!ent->valid || (ent->link.prev >= 0))
1365                         continue;
1366
1367                 vadr = ent->vadr;
1368                 type = ent->type;
1369                 size = 0;
1370                 idx = i;
1371                 do {
1372                         ent = mmu->exmap_tbl + idx;
1373                         size += PAGE_SIZE << ent->order;
1374                 } while ((idx = ent->link.next) >= 0);
1375
1376                 len += sprintf(buf + len, "0x%06lx %#8lx",
1377                                virt_to_omap_mmu(mmu, vadr), size);
1378
1379                 if (type == EXMAP_TYPE_FB) {
1380                         len += sprintf(buf + len, "    framebuf\n");
1381                 } else {
1382                         len += sprintf(buf + len, "\n");
1383                         idx = i;
1384                         do {
1385                                 ent = mmu->exmap_tbl + idx;
1386                                 len += sprintf(buf + len,
1387                                                /* 0xc0171000 0x100000  0*/
1388                                                "%19s0x%8p %#8lx %2d\n",
1389                                                "", ent->buf,
1390                                                PAGE_SIZE << ent->order,
1391                                                ent->usecount);
1392                         } while ((idx = ent->link.next) >= 0);
1393                 }
1394
1395                 i++;
1396         }
1397
1398         up_read(&mmu->exmap_sem);
1399         return len;
1400 }
1401
1402 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1403                            const char *buf,
1404                            size_t count)
1405 {
1406         struct omap_mmu *mmu = dev_get_drvdata(dev);
1407         unsigned long base = 0, len = 0;
1408         int ret;
1409
1410         sscanf(buf, "%lx %lx", &base, &len);
1411
1412         if (!base)
1413                 return -EINVAL;
1414
1415         if (len) {
1416                 /* Add the mapping */
1417                 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1418                 if (ret < 0)
1419                         return ret;
1420         } else {
1421                 /* Remove the mapping */
1422                 ret = omap_mmu_exunmap(mmu, base);
1423                 if (ret < 0)
1424                         return ret;
1425         }
1426
1427         return count;
1428 }
1429
1430 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1431
1432 static ssize_t mempool_show(struct class *class, char *buf)
1433 {
1434         int min_nr_1M = 0, curr_nr_1M = 0;
1435         int min_nr_64K = 0, curr_nr_64K = 0;
1436         int total = 0;
1437
1438         if (likely(mempool_1M)) {
1439                 min_nr_1M  = mempool_1M->min_nr;
1440                 curr_nr_1M = mempool_1M->curr_nr;
1441                 total += min_nr_1M * SZ_1M;
1442         }
1443         if (likely(mempool_64K)) {
1444                 min_nr_64K  = mempool_64K->min_nr;
1445                 curr_nr_64K = mempool_64K->curr_nr;
1446                 total += min_nr_64K * SZ_64K;
1447         }
1448
1449         return sprintf(buf,
1450                        "0x%x\n"
1451                        "1M  buffer: %d (%d free)\n"
1452                        "64K buffer: %d (%d free)\n",
1453                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1454 }
1455
1456
1457 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1458
1459 static struct class omap_mmu_class = {
1460         .name           = "mmu",
1461 };
1462
1463 int omap_mmu_register(struct omap_mmu *mmu)
1464 {
1465         int ret;
1466
1467         mmu->dev = device_create(&omap_mmu_class, NULL, 0, "%s", mmu->name);
1468         if (unlikely(IS_ERR(mmu->dev)))
1469                 return PTR_ERR(mmu->dev);
1470         dev_set_drvdata(mmu->dev, mmu);
1471
1472         mmu->exmap_tbl = kcalloc(mmu->nr_tlb_entries, sizeof(struct exmap_tbl),
1473                                  GFP_KERNEL);
1474         if (!mmu->exmap_tbl)
1475                 return -ENOMEM;
1476
1477         mmu->twl_mm = mm_alloc();
1478         if (!mmu->twl_mm) {
1479                 ret = -ENOMEM;
1480                 goto err_mm_alloc;
1481         }
1482
1483         init_rwsem(&mmu->exmap_sem);
1484
1485         ret = omap_mmu_init(mmu);
1486         if (unlikely(ret))
1487                 goto err_mmu_init;
1488
1489         ret = device_create_file(mmu->dev, &dev_attr_mmu);
1490         if (unlikely(ret))
1491                 goto err_dev_create_mmu;
1492         ret = device_create_file(mmu->dev, &dev_attr_exmap);
1493         if (unlikely(ret))
1494                 goto err_dev_create_exmap;
1495
1496         if (likely(mmu->membase)) {
1497                 dev_attr_mem.size = mmu->memsize;
1498                 ret = device_create_bin_file(mmu->dev,
1499                                              &dev_attr_mem);
1500                 if (unlikely(ret))
1501                         goto err_bin_create_mem;
1502         }
1503         return 0;
1504
1505 err_bin_create_mem:
1506         device_remove_file(mmu->dev, &dev_attr_exmap);
1507 err_dev_create_exmap:
1508         device_remove_file(mmu->dev, &dev_attr_mmu);
1509 err_dev_create_mmu:
1510         omap_mmu_shutdown(mmu);
1511 err_mmu_init:
1512         kfree(mmu->twl_mm);
1513         mmu->twl_mm = NULL;
1514 err_mm_alloc:
1515         kfree(mmu->exmap_tbl);
1516         mmu->exmap_tbl = NULL;
1517         device_unregister(mmu->dev);
1518         return ret;
1519 }
1520 EXPORT_SYMBOL_GPL(omap_mmu_register);
1521
1522 void omap_mmu_unregister(struct omap_mmu *mmu)
1523 {
1524         omap_mmu_shutdown(mmu);
1525         omap_mmu_kmem_release();
1526
1527         device_remove_file(mmu->dev, &dev_attr_mmu);
1528         device_remove_file(mmu->dev, &dev_attr_exmap);
1529
1530         if (likely(mmu->membase))
1531                 device_remove_bin_file(mmu->dev, &dev_attr_mem);
1532
1533         device_unregister(mmu->dev);
1534
1535         kfree(mmu->exmap_tbl);
1536         mmu->exmap_tbl = NULL;
1537
1538         if (mmu->twl_mm) {
1539                 __mmdrop(mmu->twl_mm);
1540                 mmu->twl_mm = NULL;
1541         }
1542 }
1543 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1544
1545 static int __init omap_mmu_class_init(void)
1546 {
1547         int ret = class_register(&omap_mmu_class);
1548         if (!ret)
1549                 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1550
1551         return ret;
1552 }
1553
1554 static void __exit omap_mmu_class_exit(void)
1555 {
1556         class_remove_file(&omap_mmu_class, &class_attr_mempool);
1557         class_unregister(&omap_mmu_class);
1558 }
1559
1560 subsys_initcall(omap_mmu_class_init);
1561 module_exit(omap_mmu_class_exit);
1562
1563 MODULE_LICENSE("GPL");