]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/mmu.c
ARM: OMAP: Fix omap mmu framework for omap1
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
1 /*
2  * linux/arch/arm/plat-omap/mmu.c
3  *
4  * OMAP MMU management framework
5  *
6  * Copyright (C) 2002-2006 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <lethal@linux-sh.org>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  */
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/err.h>
32 #include <linux/clk.h>
33 #include <linux/device.h>
34 #include <linux/interrupt.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/arch/mmu.h>
40 #include <asm/sizes.h>
41
42 #if defined(CONFIG_ARCH_OMAP1)
43 #include "../mach-omap1/mmu.h"
44 #elif defined(CONFIG_ARCH_OMAP2)
45 #include "../mach-omap2/mmu.h"
46 #endif
47
48 /*
49  * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
50  * MMU has base and victim implemented in different bits in the LOCK
51  * register (shifts are still the same), all of the other registers are
52  * the same on all of the MMUs..
53  */
54 #define MMU_LOCK_BASE_SHIFT             10
55 #define MMU_LOCK_VICTIM_SHIFT           4
56
57 #define CAMERA_MMU_LOCK_BASE_MASK       (0x7 << MMU_LOCK_BASE_SHIFT)
58 #define CAMERA_MMU_LOCK_VICTIM_MASK     (0x7 << MMU_LOCK_VICTIM_SHIFT)
59
60 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
61 #define ORDER_1MB       (20 - PAGE_SHIFT)
62 #define ORDER_64KB      (16 - PAGE_SHIFT)
63 #define ORDER_4KB       (12 - PAGE_SHIFT)
64
65 #define MMU_CNTL_EMUTLBUPDATE   (1<<3)
66 #define MMU_CNTL_TWLENABLE      (1<<2)
67 #define MMU_CNTL_MMUENABLE      (1<<1)
68
69 static mempool_t *mempool_1M;
70 static mempool_t *mempool_64K;
71
72 #define omap_mmu_for_each_tlb_entry(mmu, entry)                 \
73         for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
74              entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
75              entry++)
76
77 #define to_dev(obj)     container_of(obj, struct device, kobj)
78
79 static void *mempool_alloc_from_pool(mempool_t *pool,
80                                      unsigned int __nocast gfp_mask)
81 {
82         spin_lock_irq(&pool->lock);
83         if (likely(pool->curr_nr)) {
84                 void *element = pool->elements[--pool->curr_nr];
85                 spin_unlock_irq(&pool->lock);
86                 return element;
87         }
88
89         spin_unlock_irq(&pool->lock);
90         return mempool_alloc(pool, gfp_mask);
91 }
92
93 /*
94  * kmem_reserve(), kmem_release():
95  * reserve or release kernel memory for exmap().
96  *
97  * exmap() might request consecutive 1MB or 64kB,
98  * but it will be difficult after memory pages are fragmented.
99  * So, user can reserve such memory blocks in the early phase
100  * through kmem_reserve().
101  */
102 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
103 {
104         return (void *)__get_dma_pages(gfp, (unsigned int)order);
105 }
106
107 static void omap_mmu_pool_free(void *buf, void *order)
108 {
109         free_pages((unsigned long)buf, (unsigned int)order);
110 }
111
112 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
113 {
114         unsigned long len = size;
115
116         /* alignment check */
117         if (!is_aligned(size, SZ_64K)) {
118                 printk(KERN_ERR
119                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
120                 return -EINVAL;
121         }
122
123         if (size > (1 << mmu->addrspace)) {
124                 printk(KERN_ERR
125                        "omapdsp: size(0x%lx) is larger than DSP memory space "
126                        "size (0x%x.\n", size, (1 << mmu->addrspace));
127                 return -EINVAL;
128         }
129
130         if (size >= SZ_1M) {
131                 int nr = size >> 20;
132
133                 if (likely(!mempool_1M))
134                         mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
135                                                     omap_mmu_pool_free,
136                                                     (void *)ORDER_1MB);
137                 else
138                         mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
139                                        GFP_KERNEL);
140
141                 size &= ~(0xf << 20);
142         }
143
144         if (size >= SZ_64K) {
145                 int nr = size >> 16;
146
147                 if (likely(!mempool_64K))
148                         mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
149                                                      omap_mmu_pool_free,
150                                                      (void *)ORDER_64KB);
151                 else
152                         mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
153                                        GFP_KERNEL);
154
155                 size &= ~(0xf << 16);
156         }
157
158         if (size)
159                 len -= size;
160
161         return len;
162 }
163 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
164
165 void omap_mmu_kmem_release(void)
166 {
167         if (mempool_64K) {
168                 mempool_destroy(mempool_64K);
169                 mempool_64K = NULL;
170         }
171
172         if (mempool_1M) {
173                 mempool_destroy(mempool_1M);
174                 mempool_1M = NULL;
175         }
176 }
177 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
178
179 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
180 {
181         struct page *page, *ps, *pe;
182
183         ps = virt_to_page(buf);
184         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
185
186         for (page = ps; page < pe; page++)
187                 ClearPageReserved(page);
188
189         if ((order == ORDER_64KB) && likely(mempool_64K))
190                 mempool_free((void *)buf, mempool_64K);
191         else if ((order == ORDER_1MB) && likely(mempool_1M))
192                 mempool_free((void *)buf, mempool_1M);
193         else
194                 free_pages(buf, order);
195 }
196
197 /*
198  * ARM MMU operations
199  */
200 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
201 {
202         long off;
203         unsigned long sz_left;
204         pmd_t *pmdp;
205         pte_t *ptep;
206         int prot_pmd, prot_pte;
207
208         printk(KERN_DEBUG
209                "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
210                virt, phys, size);
211
212         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
213         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
214
215         pmdp = pmd_offset(pgd_offset_k(virt), virt);
216         if (pmd_none(*pmdp)) {
217                 ptep = pte_alloc_one_kernel(&init_mm, 0);
218                 if (ptep == NULL)
219                         return -ENOMEM;
220                 /* note: two PMDs will be set  */
221                 pmd_populate_kernel(&init_mm, pmdp, ptep);
222         }
223
224         off = phys - virt;
225         for (sz_left = size;
226              sz_left >= PAGE_SIZE;
227              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
228                 ptep = pte_offset_kernel(pmdp, virt);
229                 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
230         }
231         if (sz_left)
232                 BUG();
233
234         return 0;
235 }
236 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
237
238 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
239 {
240         unsigned long sz_left;
241         pmd_t *pmdp;
242         pte_t *ptep;
243
244         printk(KERN_DEBUG
245                "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
246                virt, size);
247
248         for (sz_left = size;
249              sz_left >= PAGE_SIZE;
250              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
251                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
252                 ptep = pte_offset_kernel(pmdp, virt);
253                 pte_clear(&init_mm, virt, ptep);
254         }
255         if (sz_left)
256                 BUG();
257 }
258 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
259
260 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
261 {
262         /* exmap_sem should be held before calling this function */
263         struct exmap_tbl *ent;
264
265 start:
266         omap_mmu_for_each_tlb_entry(mmu, ent) {
267                 void *mapadr;
268                 unsigned long mapsize;
269
270                 if (!ent->valid)
271                         continue;
272                 mapadr = (void *)ent->vadr;
273                 mapsize = 1 << (ent->order + PAGE_SHIFT);
274                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
275                         if (vadr + len <= mapadr + mapsize) {
276                                 /* this map covers whole address. */
277                                 return 1;
278                         } else {
279                                 /*
280                                  * this map covers partially.
281                                  * check rest portion.
282                                  */
283                                 len -= mapadr + mapsize - vadr;
284                                 vadr = mapadr + mapsize;
285                                 goto start;
286                         }
287                 }
288         }
289
290         return 0;
291 }
292 EXPORT_SYMBOL_GPL(exmap_valid);
293
294 /*
295  * omap_mmu_exmap_use(), unuse():
296  * when the mapped area is exported to user space with mmap,
297  * the usecount is incremented.
298  * while the usecount > 0, that area can't be released.
299  */
300 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
301 {
302         struct exmap_tbl *ent;
303
304         down_write(&mmu->exmap_sem);
305         omap_mmu_for_each_tlb_entry(mmu, ent) {
306                 void *mapadr;
307                 unsigned long mapsize;
308
309                 if (!ent->valid)
310                         continue;
311                 mapadr = (void *)ent->vadr;
312                 mapsize = 1 << (ent->order + PAGE_SHIFT);
313                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
314                         ent->usecount++;
315         }
316         up_write(&mmu->exmap_sem);
317 }
318 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
319
320 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
321 {
322         struct exmap_tbl *ent;
323
324         down_write(&mmu->exmap_sem);
325         omap_mmu_for_each_tlb_entry(mmu, ent) {
326                 void *mapadr;
327                 unsigned long mapsize;
328
329                 if (!ent->valid)
330                         continue;
331                 mapadr = (void *)ent->vadr;
332                 mapsize = 1 << (ent->order + PAGE_SHIFT);
333                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
334                         ent->usecount--;
335         }
336         up_write(&mmu->exmap_sem);
337 }
338 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
339
340 /*
341  * omap_mmu_virt_to_phys()
342  * returns physical address, and sets len to valid length
343  */
344 unsigned long
345 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
346 {
347         struct exmap_tbl *ent;
348
349         if (omap_mmu_internal_memory(mmu, vadr)) {
350                 unsigned long addr = (unsigned long)vadr;
351                 *len = mmu->membase + mmu->memsize - addr;
352                 return addr;
353         }
354
355         /* EXRAM */
356         omap_mmu_for_each_tlb_entry(mmu, ent) {
357                 void *mapadr;
358                 unsigned long mapsize;
359
360                 if (!ent->valid)
361                         continue;
362                 mapadr = (void *)ent->vadr;
363                 mapsize = 1 << (ent->order + PAGE_SHIFT);
364                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
365                         *len = mapadr + mapsize - vadr;
366                         return __pa(ent->buf) + vadr - mapadr;
367                 }
368         }
369
370         /* valid mapping not found */
371         return 0;
372 }
373 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
374
375 /*
376  * PTE operations
377  */
378 static inline void
379 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
380                        unsigned long phys, int prot)
381 {
382         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
383         if (virt & (1 << SECTION_SHIFT))
384                 pmdp++;
385         *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
386         flush_pmd_entry(pmdp);
387 }
388
389 static inline void
390 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
391                             unsigned long phys, int prot)
392 {
393         int i;
394         for (i = 0; i < 16; i += 1) {
395                 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
396                 virt += (PGDIR_SIZE / 2);
397         }
398 }
399
400 static inline int
401 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
402                     unsigned long phys, pgprot_t prot)
403 {
404         pte_t *ptep;
405         pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
406
407         if (!(prot & PTE_TYPE_MASK))
408                 prot |= PTE_TYPE_SMALL;
409
410         if (pmd_none(*pmdp)) {
411                 ptep = pte_alloc_one_kernel(mm, virt);
412                 if (ptep == NULL)
413                         return -ENOMEM;
414                 pmd_populate_kernel(mm, pmdp, ptep);
415         }
416         ptep = pte_offset_kernel(pmdp, virt);
417         ptep -= PTRS_PER_PTE;
418         *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
419         flush_pmd_entry((pmd_t *)ptep);
420         return 0;
421 }
422
423 static inline int
424 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
425                          unsigned long phys, pgprot_t prot)
426 {
427         int i, ret;
428         for (i = 0; i < 16; i += 1) {
429                 ret = omap_mmu_alloc_page(mm, virt, phys,
430                                           prot | PTE_TYPE_LARGE);
431                 if (ret)
432                         return -ENOMEM; /* only 1st time */
433                 virt += PAGE_SIZE;
434         }
435         return 0;
436 }
437
438 static int omap_mmu_load_pte(struct omap_mmu *mmu,
439                              struct omap_mmu_tlb_entry *e)
440 {
441         int ret = 0;
442         struct mm_struct *mm = mmu->twl_mm;
443         const unsigned long va = e->va;
444         const unsigned long pa = e->pa;
445         const pgprot_t prot = mmu->ops->pte_get_attr(e);
446
447         spin_lock(&mm->page_table_lock);
448
449         switch (e->pgsz) {
450         case OMAP_MMU_CAM_PAGESIZE_16MB:
451                 omap_mmu_alloc_supersection(mm, va, pa, prot);
452                 break;
453         case OMAP_MMU_CAM_PAGESIZE_1MB:
454                 omap_mmu_alloc_section(mm, va, pa, prot);
455                 break;
456         case OMAP_MMU_CAM_PAGESIZE_64KB:
457                 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
458                 break;
459         case OMAP_MMU_CAM_PAGESIZE_4KB:
460                 ret = omap_mmu_alloc_page(mm, va, pa, prot);
461                 break;
462         default:
463                 BUG();
464                 break;
465         }
466
467         spin_unlock(&mm->page_table_lock);
468
469         return ret;
470 }
471
472 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
473 {
474         pte_t *ptep, *end;
475         pmd_t *pmdp;
476         struct mm_struct *mm = mmu->twl_mm;
477
478         spin_lock(&mm->page_table_lock);
479
480         pmdp = pmd_offset(pgd_offset(mm, virt), virt);
481
482         if (pmd_none(*pmdp))
483                 goto out;
484
485         if (!pmd_table(*pmdp))
486                 goto invalidate_pmd;
487
488         ptep = pte_offset_kernel(pmdp, virt);
489         pte_clear(mm, virt, ptep);
490         flush_pmd_entry((pmd_t *)ptep);
491
492         /* zap pte */
493         end = pmd_page_vaddr(*pmdp);
494         ptep = end - PTRS_PER_PTE;
495         while (ptep < end) {
496                 if (!pte_none(*ptep))
497                         goto out;
498                 ptep++;
499         }
500         pte_free_kernel(pmd_page_vaddr(*pmdp));
501
502  invalidate_pmd:
503         pmd_clear(pmdp);
504         flush_pmd_entry(pmdp);
505  out:
506         spin_unlock(&mm->page_table_lock);
507 }
508
509 /*
510  * TLB operations
511  */
512 static struct cam_ram_regset *
513 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
514 {
515         return mmu->ops->cam_ram_alloc(entry);
516 }
517
518 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
519                                   struct cam_ram_regset *cr)
520 {
521         return mmu->ops->cam_ram_valid(cr);
522 }
523
524 static inline void
525 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
526 {
527         unsigned long lock = omap_mmu_read_reg(mmu, OMAP_MMU_LOCK);
528         int mask;
529
530         mask = (mmu->type == OMAP_MMU_CAMERA) ?
531                         CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
532         tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
533
534         mask = (mmu->type == OMAP_MMU_CAMERA) ?
535                         CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
536         tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
537 }
538
539 static inline void
540 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
541 {
542         omap_mmu_write_reg(mmu,
543                            (lock->base << MMU_LOCK_BASE_SHIFT) |
544                            (lock->victim << MMU_LOCK_VICTIM_SHIFT),
545                            OMAP_MMU_LOCK);
546 }
547
548 static inline void omap_mmu_flush(struct omap_mmu *mmu)
549 {
550         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_FLUSH_ENTRY);
551 }
552
553 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
554 {
555         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_LD_TLB);
556 }
557
558 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
559                        struct cam_ram_regset *cr)
560 {
561         /* set victim */
562         omap_mmu_set_tlb_lock(mmu, lock);
563
564         if (likely(mmu->ops->read_tlb))
565                 mmu->ops->read_tlb(mmu, cr);
566 }
567 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
568
569 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
570 {
571         if (likely(mmu->ops->load_tlb))
572                 mmu->ops->load_tlb(mmu, cr);
573
574         /* flush the entry */
575         omap_mmu_flush(mmu);
576
577         /* load a TLB entry */
578         omap_mmu_ldtlb(mmu);
579 }
580
581 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
582                             struct omap_mmu_tlb_entry *entry)
583 {
584         struct omap_mmu_tlb_lock lock;
585         struct cam_ram_regset *cr;
586
587         clk_enable(mmu->clk);
588         omap_dsp_request_mem();
589
590         omap_mmu_get_tlb_lock(mmu, &lock);
591         for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
592                 struct cam_ram_regset tmp;
593
594                 /* read a TLB entry */
595                 omap_mmu_read_tlb(mmu, &lock, &tmp);
596                 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
597                         goto found_victim;
598         }
599         omap_mmu_set_tlb_lock(mmu, &lock);
600
601 found_victim:
602         /* The last entry cannot be locked? */
603         if (lock.victim == (mmu->nr_tlb_entries - 1)) {
604                 printk(KERN_ERR "MMU: TLB is full.\n");
605                 return -EBUSY;
606         }
607
608         cr = omap_mmu_cam_ram_alloc(mmu, entry);
609         if (IS_ERR(cr))
610                 return PTR_ERR(cr);
611
612         omap_mmu_load_tlb(mmu, cr);
613         kfree(cr);
614
615         /* update lock base */
616         if (lock.victim == lock.base)
617                 lock.base++;
618
619         omap_mmu_set_tlb_lock(mmu, &lock);
620
621         omap_dsp_release_mem();
622         clk_disable(mmu->clk);
623         return 0;
624 }
625 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
626
627 static inline unsigned long
628 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
629 {
630         return mmu->ops->cam_va(cr);
631 }
632
633 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
634 {
635         struct omap_mmu_tlb_lock lock;
636         int i;
637         int max_valid = 0;
638
639         clk_enable(mmu->clk);
640         omap_dsp_request_mem();
641
642         omap_mmu_get_tlb_lock(mmu, &lock);
643         for (i = 0; i < lock.base; i++) {
644                 struct cam_ram_regset cr;
645
646                 /* read a TLB entry */
647                 lock.victim = i;
648                 omap_mmu_read_tlb(mmu, &lock, &cr);
649                 if (!omap_mmu_cam_ram_valid(mmu, &cr))
650                         continue;
651
652                 if (omap_mmu_cam_va(mmu, &cr) == vadr)
653                         /* flush the entry */
654                         omap_mmu_flush(mmu);
655                 else
656                         max_valid = i;
657         }
658
659         /* set new lock base */
660         lock.base = lock.victim = max_valid + 1;
661         omap_mmu_set_tlb_lock(mmu, &lock);
662
663         omap_dsp_release_mem();
664         clk_disable(mmu->clk);
665         return 0;
666 }
667 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
668
669 static void omap_mmu_gflush(struct omap_mmu *mmu)
670 {
671         struct omap_mmu_tlb_lock lock;
672
673         clk_enable(mmu->clk);
674         omap_dsp_request_mem();
675
676         omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_GFLUSH);
677         lock.base = lock.victim = mmu->nr_exmap_preserved;
678         omap_mmu_set_tlb_lock(mmu, &lock);
679
680         omap_dsp_release_mem();
681         clk_disable(mmu->clk);
682 }
683
684 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
685                             struct omap_mmu_tlb_entry *entry)
686 {
687         int ret = -1;
688         if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
689                 /*XXX use PG_flag for prsvd */
690                 ret = omap_mmu_load_pte(mmu, entry);
691                 if (ret)
692                         return ret;
693         }
694         if (entry->tlb)
695                 ret = omap_mmu_load_tlb_entry(mmu, entry);
696         return ret;
697 }
698 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
699
700 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
701 {
702         int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
703         if (ret)
704                 return ret;
705         if (mmu->ops->pte_get_attr)
706                 omap_mmu_clear_pte(mmu, vadr);
707         return ret;
708 }
709 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
710
711 /*
712  * omap_mmu_exmap()
713  *
714  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
715  * In this case, the buffer for DSP is allocated in this routine,
716  * then it is mapped.
717  * On the other hand, for example - frame buffer sharing, calls
718  * this function with padr set. It means some known address space
719  * pointed with padr is going to be shared with DSP.
720  */
721 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
722                    unsigned long padr, unsigned long size,
723                    enum exmap_type type)
724 {
725         unsigned long pgsz;
726         void *buf;
727         unsigned int order = 0;
728         unsigned long unit;
729         int prev = -1;
730         unsigned long _dspadr = dspadr;
731         unsigned long _padr = padr;
732         void *_vadr = omap_mmu_to_virt(mmu, dspadr);
733         unsigned long _size = size;
734         struct omap_mmu_tlb_entry tlb_ent;
735         struct exmap_tbl *exmap_ent, *tmp_ent;
736         int status;
737         int idx;
738
739 #define MINIMUM_PAGESZ  SZ_4K
740         /*
741          * alignment check
742          */
743         if (!is_aligned(size, MINIMUM_PAGESZ)) {
744                 printk(KERN_ERR
745                        "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
746                 return -EINVAL;
747         }
748         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
749                 printk(KERN_ERR
750                        "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
751                 return -EINVAL;
752         }
753         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
754                 printk(KERN_ERR
755                        "MMU: physical address(0x%lx) is not aligned.\n",
756                        padr);
757                 return -EINVAL;
758         }
759
760         /* address validity check */
761         if ((dspadr < mmu->memsize) ||
762             (dspadr >= (1 << mmu->addrspace))) {
763                 printk(KERN_ERR
764                        "MMU: illegal address/size for %s().\n",
765                        __FUNCTION__);
766                 return -EINVAL;
767         }
768
769         down_write(&mmu->exmap_sem);
770
771         /* overlap check */
772         omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
773                 unsigned long mapsize;
774
775                 if (!tmp_ent->valid)
776                         continue;
777                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
778                 if ((_vadr + size > tmp_ent->vadr) &&
779                     (_vadr < tmp_ent->vadr + mapsize)) {
780                         printk(KERN_ERR "MMU: exmap page overlap!\n");
781                         up_write(&mmu->exmap_sem);
782                         return -EINVAL;
783                 }
784         }
785
786 start:
787         buf = NULL;
788         /* Are there any free TLB lines?  */
789         for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
790                 if (!mmu->exmap_tbl[idx].valid)
791                         goto found_free;
792
793         printk(KERN_ERR "MMU: DSP TLB is full.\n");
794         status = -EBUSY;
795         goto fail;
796
797 found_free:
798         exmap_ent = mmu->exmap_tbl + idx;
799
800         if ((_size >= SZ_1M) &&
801             (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
802             is_aligned(_dspadr, SZ_1M)) {
803                 unit = SZ_1M;
804                 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
805         } else if ((_size >= SZ_64K) &&
806                    (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
807                    is_aligned(_dspadr, SZ_64K)) {
808                 unit = SZ_64K;
809                 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
810         } else {
811                 unit = SZ_4K;
812                 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
813         }
814
815         order = get_order(unit);
816
817         /* buffer allocation */
818         if (type == EXMAP_TYPE_MEM) {
819                 struct page *page, *ps, *pe;
820
821                 if ((order == ORDER_1MB) && likely(mempool_1M))
822                         buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
823                 else if ((order == ORDER_64KB) && likely(mempool_64K))
824                         buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
825                 else {
826                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
827                         if (buf == NULL) {
828                                 status = -ENOMEM;
829                                 goto fail;
830                         }
831                 }
832
833                 /* mark the pages as reserved; this is needed for mmap */
834                 ps = virt_to_page(buf);
835                 pe = virt_to_page(buf + unit);
836
837                 for (page = ps; page < pe; page++)
838                         SetPageReserved(page);
839
840                 _padr = __pa(buf);
841         }
842
843         /*
844          * mapping for ARM MMU:
845          * we should not access to the allocated memory through 'buf'
846          * since this area should not be cached.
847          */
848         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
849         if (status < 0)
850                 goto fail;
851
852         /* loading DSP PTE entry */
853         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
854         status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
855         if (status < 0) {
856                 exmap_clear_armmmu((unsigned long)_vadr, unit);
857                 goto fail;
858         }
859
860         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
861         exmap_ent->link.prev = prev;
862         if (prev >= 0)
863                 mmu->exmap_tbl[prev].link.next = idx;
864
865         if ((_size -= unit) == 0) {     /* normal completion */
866                 up_write(&mmu->exmap_sem);
867                 return size;
868         }
869
870         _dspadr += unit;
871         _vadr   += unit;
872         _padr = padr ? _padr + unit : 0;
873         prev = idx;
874         goto start;
875
876 fail:
877         up_write(&mmu->exmap_sem);
878         if (buf)
879                 omap_mmu_free_pages((unsigned long)buf, order);
880         omap_mmu_exunmap(mmu, dspadr);
881         return status;
882 }
883 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
884
885 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
886 {
887         unsigned long size;
888
889         /* clearing ARM MMU */
890         size = 1 << (ent->order + PAGE_SHIFT);
891         exmap_clear_armmmu((unsigned long)ent->vadr, size);
892
893         /* freeing allocated memory */
894         if (ent->type == EXMAP_TYPE_MEM) {
895                 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
896                 printk(KERN_DEBUG
897                        "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
898                        size, ent->buf);
899         }
900
901         ent->valid = 0;
902         return size;
903 }
904
905 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
906 {
907         void *vadr;
908         unsigned long size;
909         int total = 0;
910         struct exmap_tbl *ent;
911         int idx;
912
913         vadr = omap_mmu_to_virt(mmu, dspadr);
914         down_write(&mmu->exmap_sem);
915         for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
916                 ent = mmu->exmap_tbl + idx;
917                 if (!ent->valid || ent->prsvd)
918                         continue;
919                 if (ent->vadr == vadr)
920                         goto found_map;
921         }
922         up_write(&mmu->exmap_sem);
923         printk(KERN_WARNING
924                "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
925         return -EINVAL;
926
927 found_map:
928         if (ent->usecount > 0) {
929                 printk(KERN_ERR
930                        "MMU: exmap reference count is not 0.\n"
931                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
932                        idx, ent->vadr, ent->order, ent->usecount);
933                 up_write(&mmu->exmap_sem);
934                 return -EINVAL;
935         }
936         /* clearing DSP PTE entry */
937         omap_mmu_clear_pte_entry(mmu, dspadr);
938
939         /* clear ARM MMU and free buffer */
940         size = unmap_free_arm(ent);
941         total += size;
942
943         /* we don't free PTEs */
944
945         /* flush TLB */
946         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
947
948         /* check if next mapping is in same group */
949         idx = ent->link.next;
950         if (idx < 0)
951                 goto up_out;    /* normal completion */
952         ent = mmu->exmap_tbl + idx;
953         dspadr += size;
954         vadr   += size;
955         if (ent->vadr == vadr)
956                 goto found_map; /* continue */
957
958         printk(KERN_ERR
959                "MMU: illegal exmap_tbl grouping!\n"
960                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
961                vadr, idx, ent->vadr);
962         up_write(&mmu->exmap_sem);
963         return -EINVAL;
964
965 up_out:
966         up_write(&mmu->exmap_sem);
967         return total;
968 }
969 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
970
971 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
972 {
973         struct exmap_tbl *ent;
974
975         down_write(&mmu->exmap_sem);
976
977         /* clearing TLB entry */
978         omap_mmu_gflush(mmu);
979
980         omap_mmu_for_each_tlb_entry(mmu, ent)
981                 if (ent->valid && !ent->prsvd)
982                         unmap_free_arm(ent);
983
984         /* flush TLB */
985         if (likely(mmu->membase))
986                 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
987                                        mmu->membase + (1 << mmu->addrspace));
988
989         up_write(&mmu->exmap_sem);
990 }
991 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
992
993 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
994                                     unsigned long dspadr, int index)
995 {
996         unsigned long phys;
997         void *virt;
998         struct omap_mmu_tlb_entry tlb_ent;
999
1000         phys = __pa(buf);
1001         virt = omap_mmu_to_virt(mmu, dspadr);
1002         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1003         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1004         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1005         omap_mmu_load_pte_entry(mmu, &tlb_ent);
1006 }
1007 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1008
1009 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
1010 {
1011         void *virt = omap_mmu_to_virt(mmu, dspadr);
1012
1013         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1014         /* DSP MMU is shutting down. not handled here. */
1015 }
1016 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1017
1018 static void omap_mmu_reset(struct omap_mmu *mmu)
1019 {
1020 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1021         int i;
1022
1023         omap_mmu_write_reg(mmu, 0x2, OMAP_MMU_SYSCONFIG);
1024
1025         for (i = 0; i < 10000; i++)
1026                 if (likely(omap_mmu_read_reg(mmu, OMAP_MMU_SYSSTATUS) & 0x1))
1027                         break;
1028 #endif
1029 }
1030
1031 void omap_mmu_disable(struct omap_mmu *mmu)
1032 {
1033         omap_mmu_write_reg(mmu, 0x00, OMAP_MMU_CNTL);
1034 }
1035 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1036
1037 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1038 {
1039         u32 val = OMAP_MMU_CNTL_MMU_EN;
1040
1041         if (likely(reset))
1042                 omap_mmu_reset(mmu);
1043 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1044         if (mmu->ops->pte_get_attr) {
1045                 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd),
1046                                    OMAP_MMU_TTB);
1047                 val |= MMU_CNTL_TWLENABLE;
1048         }
1049 #else
1050         val |= OMAP_MMU_CNTL_RESET_SW;
1051 #endif
1052         omap_mmu_write_reg(mmu, val, OMAP_MMU_CNTL);
1053 }
1054 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1055
1056 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1057 {
1058         struct omap_mmu *mmu = dev_id;
1059
1060         if (likely(mmu->ops->interrupt))
1061                 mmu->ops->interrupt(mmu);
1062
1063         return IRQ_HANDLED;
1064 }
1065
1066 static int omap_mmu_init(struct omap_mmu *mmu)
1067 {
1068         struct omap_mmu_tlb_lock tlb_lock;
1069         int ret = 0;
1070
1071         clk_enable(mmu->clk);
1072         omap_dsp_request_mem();
1073         down_write(&mmu->exmap_sem);
1074
1075         ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1076                           mmu->name,  mmu);
1077         if (ret < 0) {
1078                 printk(KERN_ERR
1079                        "failed to register MMU interrupt: %d\n", ret);
1080                 goto fail;
1081         }
1082
1083         omap_mmu_disable(mmu);  /* clear all */
1084         udelay(100);
1085         omap_mmu_enable(mmu, 1);
1086
1087         memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1088         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1089
1090         if (unlikely(mmu->ops->startup))
1091                 ret = mmu->ops->startup(mmu);
1092  fail:
1093         up_write(&mmu->exmap_sem);
1094         omap_dsp_release_mem();
1095         clk_disable(mmu->clk);
1096
1097         return ret;
1098 }
1099
1100 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1101 {
1102         free_irq(mmu->irq, mmu);
1103
1104         if (unlikely(mmu->ops->shutdown))
1105                 mmu->ops->shutdown(mmu);
1106
1107         omap_mmu_exmap_flush(mmu);
1108         omap_mmu_disable(mmu); /* clear all */
1109 }
1110
1111 /*
1112  * omap_mmu_mem_enable() / disable()
1113  */
1114 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1115 {
1116         if (unlikely(mmu->ops->mem_enable))
1117                 return mmu->ops->mem_enable(mmu, addr);
1118
1119         down_read(&mmu->exmap_sem);
1120         return 0;
1121 }
1122 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1123
1124 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1125 {
1126         if (unlikely(mmu->ops->mem_disable)) {
1127                 mmu->ops->mem_disable(mmu, addr);
1128                 return;
1129         }
1130
1131         up_read(&mmu->exmap_sem);
1132 }
1133 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1134
1135 /*
1136  * dsp_mem file operations
1137  */
1138 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1139                            loff_t *ppos)
1140 {
1141         unsigned long p = *ppos;
1142         void *vadr = omap_mmu_to_virt(mmu, p);
1143         ssize_t size = mmu->memsize;
1144         ssize_t read;
1145
1146         if (p >= size)
1147                 return 0;
1148         clk_enable(mmu->memclk);
1149         read = count;
1150         if (count > size - p)
1151                 read = size - p;
1152         if (copy_to_user(buf, vadr, read)) {
1153                 read = -EFAULT;
1154                 goto out;
1155         }
1156         *ppos += read;
1157 out:
1158         clk_disable(mmu->memclk);
1159         return read;
1160 }
1161
1162 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1163                           loff_t *ppos)
1164 {
1165         unsigned long p = *ppos;
1166         void *vadr = omap_mmu_to_virt(mmu, p);
1167
1168         if (!exmap_valid(mmu, vadr, count)) {
1169                 printk(KERN_ERR
1170                        "MMU: DSP address %08lx / size %08x "
1171                        "is not valid!\n", p, count);
1172                 return -EFAULT;
1173         }
1174         if (count > (1 << mmu->addrspace) - p)
1175                 count = (1 << mmu->addrspace) - p;
1176         if (copy_to_user(buf, vadr, count))
1177                 return -EFAULT;
1178         *ppos += count;
1179
1180         return count;
1181 }
1182
1183 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1184                                  loff_t offset, size_t count)
1185 {
1186         struct device *dev = to_dev(kobj);
1187         struct omap_mmu *mmu = dev_get_drvdata(dev);
1188         unsigned long p = (unsigned long)offset;
1189         void *vadr = omap_mmu_to_virt(mmu, p);
1190         int ret;
1191
1192         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1193                 return -EBUSY;
1194
1195         if (p < mmu->memsize)
1196                 ret = intmem_read(mmu, buf, count, &offset);
1197         else
1198                 ret = exmem_read(mmu, buf, count, &offset);
1199
1200         omap_mmu_mem_disable(mmu, vadr);
1201
1202         return ret;
1203 }
1204
1205 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1206                             loff_t *ppos)
1207 {
1208         unsigned long p = *ppos;
1209         void *vadr = omap_mmu_to_virt(mmu, p);
1210         ssize_t size = mmu->memsize;
1211         ssize_t written;
1212
1213         if (p >= size)
1214                 return 0;
1215         clk_enable(mmu->memclk);
1216         written = count;
1217         if (count > size - p)
1218                 written = size - p;
1219         if (copy_from_user(vadr, buf, written)) {
1220                 written = -EFAULT;
1221                 goto out;
1222         }
1223         *ppos += written;
1224 out:
1225         clk_disable(mmu->memclk);
1226         return written;
1227 }
1228
1229 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1230                            loff_t *ppos)
1231 {
1232         unsigned long p = *ppos;
1233         void *vadr = omap_mmu_to_virt(mmu, p);
1234
1235         if (!exmap_valid(mmu, vadr, count)) {
1236                 printk(KERN_ERR
1237                        "MMU: DSP address %08lx / size %08x "
1238                        "is not valid!\n", p, count);
1239                 return -EFAULT;
1240         }
1241         if (count > (1 << mmu->addrspace) - p)
1242                 count = (1 << mmu->addrspace) - p;
1243         if (copy_from_user(vadr, buf, count))
1244                 return -EFAULT;
1245         *ppos += count;
1246
1247         return count;
1248 }
1249
1250 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1251                                   loff_t offset, size_t count)
1252 {
1253         struct device *dev = to_dev(kobj);
1254         struct omap_mmu *mmu = dev_get_drvdata(dev);
1255         unsigned long p = (unsigned long)offset;
1256         void *vadr = omap_mmu_to_virt(mmu, p);
1257         int ret;
1258
1259         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1260                 return -EBUSY;
1261
1262         if (p < mmu->memsize)
1263                 ret = intmem_write(mmu, buf, count, &offset);
1264         else
1265                 ret = exmem_write(mmu, buf, count, &offset);
1266
1267         omap_mmu_mem_disable(mmu, vadr);
1268
1269         return ret;
1270 }
1271
1272 static struct bin_attribute dev_attr_mem = {
1273         .attr   = {
1274                 .name   = "mem",
1275                 .owner  = THIS_MODULE,
1276                 .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
1277         },
1278
1279         .read   = omap_mmu_mem_read,
1280         .write  = omap_mmu_mem_write,
1281 };
1282
1283 /* To be obsolete for backward compatibility */
1284 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1285                             loff_t offset, size_t count)
1286 {
1287         return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1288 }
1289 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1290
1291 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1292                              loff_t offset, size_t count)
1293 {
1294         return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1295 }
1296 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1297
1298 /*
1299  * sysfs files
1300  */
1301 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1302                              char *buf)
1303 {
1304         struct omap_mmu *mmu = dev_get_drvdata(dev);
1305         struct omap_mmu_tlb_lock tlb_lock;
1306         int ret = -EIO;
1307
1308         clk_enable(mmu->clk);
1309         omap_dsp_request_mem();
1310
1311         down_read(&mmu->exmap_sem);
1312
1313         omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1314
1315         if (likely(mmu->ops->show))
1316                 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1317
1318         /* restore victim entry */
1319         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1320
1321         up_read(&mmu->exmap_sem);
1322         omap_dsp_release_mem();
1323         clk_disable(mmu->clk);
1324
1325         return ret;
1326 }
1327
1328 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1329
1330 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1331                           char *buf)
1332 {
1333         struct omap_mmu *mmu = dev_get_drvdata(dev);
1334         struct exmap_tbl *ent;
1335         int len;
1336         int i = 0;
1337
1338         down_read(&mmu->exmap_sem);
1339         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
1340                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
1341
1342         omap_mmu_for_each_tlb_entry(mmu, ent) {
1343                 void *vadr;
1344                 unsigned long size;
1345                 enum exmap_type type;
1346                 int idx;
1347
1348                 /* find a top of link */
1349                 if (!ent->valid || (ent->link.prev >= 0))
1350                         continue;
1351
1352                 vadr = ent->vadr;
1353                 type = ent->type;
1354                 size = 0;
1355                 idx = i;
1356                 do {
1357                         ent = mmu->exmap_tbl + idx;
1358                         size += PAGE_SIZE << ent->order;
1359                 } while ((idx = ent->link.next) >= 0);
1360
1361                 len += sprintf(buf + len, "0x%06lx %#8lx",
1362                                virt_to_omap_mmu(mmu, vadr), size);
1363
1364                 if (type == EXMAP_TYPE_FB) {
1365                         len += sprintf(buf + len, "    framebuf\n");
1366                 } else {
1367                         len += sprintf(buf + len, "\n");
1368                         idx = i;
1369                         do {
1370                                 ent = mmu->exmap_tbl + idx;
1371                                 len += sprintf(buf + len,
1372                                                /* 0xc0171000 0x100000  0*/
1373                                                "%19s0x%8p %#8lx %2d\n",
1374                                                "", ent->buf,
1375                                                PAGE_SIZE << ent->order,
1376                                                ent->usecount);
1377                         } while ((idx = ent->link.next) >= 0);
1378                 }
1379
1380                 i++;
1381         }
1382
1383         up_read(&mmu->exmap_sem);
1384         return len;
1385 }
1386
1387 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1388                            const char *buf,
1389                            size_t count)
1390 {
1391         struct omap_mmu *mmu = dev_get_drvdata(dev);
1392         unsigned long base = 0, len = 0;
1393         int ret;
1394
1395         sscanf(buf, "%lx %lx", &base, &len);
1396
1397         if (!base)
1398                 return -EINVAL;
1399
1400         if (len) {
1401                 /* Add the mapping */
1402                 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1403                 if (ret < 0)
1404                         return ret;
1405         } else {
1406                 /* Remove the mapping */
1407                 ret = omap_mmu_exunmap(mmu, base);
1408                 if (ret < 0)
1409                         return ret;
1410         }
1411
1412         return count;
1413 }
1414
1415 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1416
1417 static ssize_t mempool_show(struct class *class, char *buf)
1418 {
1419         int min_nr_1M = 0, curr_nr_1M = 0;
1420         int min_nr_64K = 0, curr_nr_64K = 0;
1421         int total = 0;
1422
1423         if (likely(mempool_1M)) {
1424                 min_nr_1M  = mempool_1M->min_nr;
1425                 curr_nr_1M = mempool_1M->curr_nr;
1426                 total += min_nr_1M * SZ_1M;
1427         }
1428         if (likely(mempool_64K)) {
1429                 min_nr_64K  = mempool_64K->min_nr;
1430                 curr_nr_64K = mempool_64K->curr_nr;
1431                 total += min_nr_64K * SZ_64K;
1432         }
1433
1434         return sprintf(buf,
1435                        "0x%x\n"
1436                        "1M  buffer: %d (%d free)\n"
1437                        "64K buffer: %d (%d free)\n",
1438                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1439 }
1440
1441
1442 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1443
1444 static void omap_mmu_class_dev_release(struct device *dev)
1445 {
1446 }
1447
1448 static struct class omap_mmu_class = {
1449         .name           = "mmu",
1450         .dev_release    = omap_mmu_class_dev_release,
1451 };
1452
1453 int omap_mmu_register(struct omap_mmu *mmu)
1454 {
1455         int ret;
1456
1457         mmu->dev.class = &omap_mmu_class;
1458         strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1459         dev_set_drvdata(&mmu->dev, mmu);
1460
1461         mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1462                                  GFP_KERNEL);
1463         if (!mmu->exmap_tbl)
1464                 return -ENOMEM;
1465
1466         if (mmu->ops->pte_get_attr) {
1467                 struct mm_struct *mm =  mm_alloc();
1468                 if (!mm) {
1469                         ret = -ENOMEM;
1470                         goto err_mm_alloc;
1471                 }
1472                 mmu->twl_mm = mm;
1473         }
1474
1475         ret = device_register(&mmu->dev);
1476         if (unlikely(ret))
1477                 goto err_dev_register;
1478
1479         init_rwsem(&mmu->exmap_sem);
1480
1481         ret = omap_mmu_init(mmu);
1482         if (unlikely(ret))
1483                 goto err_mmu_init;
1484
1485         ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1486         if (unlikely(ret))
1487                 goto err_dev_create_mmu;
1488         ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1489         if (unlikely(ret))
1490                 goto err_dev_create_exmap;
1491
1492         if (likely(mmu->membase)) {
1493                 dev_attr_mem.size = mmu->memsize;
1494                 ret = device_create_bin_file(&mmu->dev,
1495                                              &dev_attr_mem);
1496                 if (unlikely(ret))
1497                         goto err_bin_create_mem;
1498         }
1499
1500         return 0;
1501
1502 err_bin_create_mem:
1503         device_remove_file(&mmu->dev, &dev_attr_exmap);
1504 err_dev_create_exmap:
1505         device_remove_file(&mmu->dev, &dev_attr_mmu);
1506 err_dev_create_mmu:
1507         omap_mmu_shutdown(mmu);
1508 err_mmu_init:
1509         device_unregister(&mmu->dev);
1510 err_dev_register:
1511         kfree(mmu->twl_mm);
1512         mmu->twl_mm = NULL;
1513 err_mm_alloc:
1514         kfree(mmu->exmap_tbl);
1515         mmu->exmap_tbl = NULL;
1516         return ret;
1517 }
1518 EXPORT_SYMBOL_GPL(omap_mmu_register);
1519
1520 void omap_mmu_unregister(struct omap_mmu *mmu)
1521 {
1522         omap_mmu_shutdown(mmu);
1523         omap_mmu_kmem_release();
1524
1525         device_remove_file(&mmu->dev, &dev_attr_mmu);
1526         device_remove_file(&mmu->dev, &dev_attr_exmap);
1527
1528         if (likely(mmu->membase))
1529                 device_remove_bin_file(&mmu->dev,
1530                                              &dev_attr_mem);
1531
1532         kfree(mmu->exmap_tbl);
1533         mmu->exmap_tbl = NULL;
1534
1535         if (mmu->ops->pte_get_attr) {
1536                 if (mmu->twl_mm) {
1537                         __mmdrop(mmu->twl_mm);
1538                         mmu->twl_mm = NULL;
1539                 }
1540         }
1541
1542         device_unregister(&mmu->dev);
1543 }
1544 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1545
1546 static int __init omap_mmu_class_init(void)
1547 {
1548         int ret = class_register(&omap_mmu_class);
1549         if (!ret)
1550                 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1551
1552         return ret;
1553 }
1554
1555 static void __exit omap_mmu_class_exit(void)
1556 {
1557         class_remove_file(&omap_mmu_class, &class_attr_mempool);
1558         class_unregister(&omap_mmu_class);
1559 }
1560
1561 subsys_initcall(omap_mmu_class_init);
1562 module_exit(omap_mmu_class_exit);
1563
1564 MODULE_LICENSE("GPL");