]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/mmu.c
3f572fd7f0b5d09537b1d5f29ea898c86fe080a1
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
1 /*
2  * linux/arch/arm/plat-omap/mmu.c
3  *
4  * OMAP MMU management framework
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24  */
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/clk.h>
30 #include <linux/device.h>
31 #include <linux/interrupt.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/pgalloc.h>
35 #include <asm/pgtable.h>
36 #include <asm/arch/mmu.h>
37 #include <asm/sizes.h>
38
39 #if defined(CONFIG_ARCH_OMAP1)
40 #include "../mach-omap1/mmu.h"
41 #elif defined(CONFIG_ARCH_OMAP2)
42 #include "../mach-omap2/mmu.h"
43 #endif
44
45 /*
46  * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
47  * MMU has base and victim implemented in different bits in the LOCK
48  * register (shifts are still the same), all of the other registers are
49  * the same on all of the MMUs..
50  */
51 #define MMU_LOCK_BASE_SHIFT             10
52 #define MMU_LOCK_VICTIM_SHIFT           4
53
54 #define CAMERA_MMU_LOCK_BASE_MASK       (0x7 << MMU_LOCK_BASE_SHIFT)
55 #define CAMERA_MMU_LOCK_VICTIM_MASK     (0x7 << MMU_LOCK_VICTIM_SHIFT)
56
57 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
58 #define ORDER_1MB       (20 - PAGE_SHIFT)
59 #define ORDER_64KB      (16 - PAGE_SHIFT)
60 #define ORDER_4KB       (12 - PAGE_SHIFT)
61
62 static mempool_t *mempool_1M;
63 static mempool_t *mempool_64K;
64
65 #define omap_mmu_for_each_tlb_entry(mmu, entry)                 \
66         for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
67              entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
68              entry++)
69
70 #define to_dev(obj)     container_of(obj, struct device, kobj)
71
72 static void *mempool_alloc_from_pool(mempool_t *pool,
73                                      unsigned int __nocast gfp_mask)
74 {
75         spin_lock_irq(&pool->lock);
76         if (likely(pool->curr_nr)) {
77                 void *element = pool->elements[--pool->curr_nr];
78                 spin_unlock_irq(&pool->lock);
79                 return element;
80         }
81
82         spin_unlock_irq(&pool->lock);
83         return mempool_alloc(pool, gfp_mask);
84 }
85
86 /*
87  * kmem_reserve(), kmem_release():
88  * reserve or release kernel memory for exmap().
89  *
90  * exmap() might request consecutive 1MB or 64kB,
91  * but it will be difficult after memory pages are fragmented.
92  * So, user can reserve such memory blocks in the early phase
93  * through kmem_reserve().
94  */
95 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
96 {
97         return (void *)__get_dma_pages(gfp, (unsigned int)order);
98 }
99
100 static void omap_mmu_pool_free(void *buf, void *order)
101 {
102         free_pages((unsigned long)buf, (unsigned int)order);
103 }
104
105 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
106 {
107         unsigned long len = size;
108
109         /* alignment check */
110         if (!is_aligned(size, SZ_64K)) {
111                 printk(KERN_ERR
112                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
113                 return -EINVAL;
114         }
115
116         if (size > (1 << mmu->addrspace)) {
117                 printk(KERN_ERR
118                        "omapdsp: size(0x%lx) is larger than DSP memory space "
119                        "size (0x%x.\n", size, (1 << mmu->addrspace));
120                 return -EINVAL;
121         }
122
123         if (size >= SZ_1M) {
124                 int nr = size >> 20;
125
126                 if (likely(!mempool_1M))
127                         mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
128                                                     omap_mmu_pool_free,
129                                                     (void *)ORDER_1MB);
130                 else
131                         mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
132                                        GFP_KERNEL);
133
134                 size &= ~(0xf << 20);
135         }
136
137         if (size >= SZ_64K) {
138                 int nr = size >> 16;
139
140                 if (likely(!mempool_64K))
141                         mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
142                                                      omap_mmu_pool_free,
143                                                      (void *)ORDER_64KB);
144                 else
145                         mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
146                                        GFP_KERNEL);
147
148                 size &= ~(0xf << 16);
149         }
150
151         if (size)
152                 len -= size;
153
154         return len;
155 }
156 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
157
158 void omap_mmu_kmem_release(void)
159 {
160         if (mempool_64K) {
161                 mempool_destroy(mempool_64K);
162                 mempool_64K = NULL;
163         }
164
165         if (mempool_1M) {
166                 mempool_destroy(mempool_1M);
167                 mempool_1M = NULL;
168         }
169 }
170 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
171
172 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
173 {
174         struct page *page, *ps, *pe;
175
176         ps = virt_to_page(buf);
177         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
178
179         for (page = ps; page < pe; page++)
180                 ClearPageReserved(page);
181
182         if ((order == ORDER_64KB) && likely(mempool_64K))
183                 mempool_free((void *)buf, mempool_64K);
184         else if ((order == ORDER_1MB) && likely(mempool_1M))
185                 mempool_free((void *)buf, mempool_1M);
186         else
187                 free_pages(buf, order);
188 }
189
190 /*
191  * ARM MMU operations
192  */
193 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
194 {
195         long off;
196         unsigned long sz_left;
197         pmd_t *pmdp;
198         pte_t *ptep;
199         int prot_pmd, prot_pte;
200
201         printk(KERN_DEBUG
202                "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
203                virt, phys, size);
204
205         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
206         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
207
208         pmdp = pmd_offset(pgd_offset_k(virt), virt);
209         if (pmd_none(*pmdp)) {
210                 ptep = pte_alloc_one_kernel(&init_mm, 0);
211                 if (ptep == NULL)
212                         return -ENOMEM;
213                 /* note: two PMDs will be set  */
214                 pmd_populate_kernel(&init_mm, pmdp, ptep);
215         }
216
217         off = phys - virt;
218         for (sz_left = size;
219              sz_left >= PAGE_SIZE;
220              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
221                 ptep = pte_offset_kernel(pmdp, virt);
222                 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
223         }
224         if (sz_left)
225                 BUG();
226
227         return 0;
228 }
229 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
230
231 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
232 {
233         unsigned long sz_left;
234         pmd_t *pmdp;
235         pte_t *ptep;
236
237         printk(KERN_DEBUG
238                "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
239                virt, size);
240
241         for (sz_left = size;
242              sz_left >= PAGE_SIZE;
243              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
244                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
245                 ptep = pte_offset_kernel(pmdp, virt);
246                 pte_clear(&init_mm, virt, ptep);
247         }
248         if (sz_left)
249                 BUG();
250 }
251 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
252
253 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
254 {
255         /* exmap_sem should be held before calling this function */
256         struct exmap_tbl *ent;
257
258 start:
259         omap_mmu_for_each_tlb_entry(mmu, ent) {
260                 void *mapadr;
261                 unsigned long mapsize;
262
263                 if (!ent->valid)
264                         continue;
265                 mapadr = (void *)ent->vadr;
266                 mapsize = 1 << (ent->order + PAGE_SHIFT);
267                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
268                         if (vadr + len <= mapadr + mapsize) {
269                                 /* this map covers whole address. */
270                                 return 1;
271                         } else {
272                                 /*
273                                  * this map covers partially.
274                                  * check rest portion.
275                                  */
276                                 len -= mapadr + mapsize - vadr;
277                                 vadr = mapadr + mapsize;
278                                 goto start;
279                         }
280                 }
281         }
282
283         return 0;
284 }
285 EXPORT_SYMBOL_GPL(exmap_valid);
286
287 /*
288  * omap_mmu_exmap_use(), unuse():
289  * when the mapped area is exported to user space with mmap,
290  * the usecount is incremented.
291  * while the usecount > 0, that area can't be released.
292  */
293 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
294 {
295         struct exmap_tbl *ent;
296
297         down_write(&mmu->exmap_sem);
298         omap_mmu_for_each_tlb_entry(mmu, ent) {
299                 void *mapadr;
300                 unsigned long mapsize;
301
302                 if (!ent->valid)
303                         continue;
304                 mapadr = (void *)ent->vadr;
305                 mapsize = 1 << (ent->order + PAGE_SHIFT);
306                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
307                         ent->usecount++;
308         }
309         up_write(&mmu->exmap_sem);
310 }
311 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
312
313 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
314 {
315         struct exmap_tbl *ent;
316
317         down_write(&mmu->exmap_sem);
318         omap_mmu_for_each_tlb_entry(mmu, ent) {
319                 void *mapadr;
320                 unsigned long mapsize;
321
322                 if (!ent->valid)
323                         continue;
324                 mapadr = (void *)ent->vadr;
325                 mapsize = 1 << (ent->order + PAGE_SHIFT);
326                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
327                         ent->usecount--;
328         }
329         up_write(&mmu->exmap_sem);
330 }
331 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
332
333 /*
334  * omap_mmu_virt_to_phys()
335  * returns physical address, and sets len to valid length
336  */
337 unsigned long
338 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
339 {
340         struct exmap_tbl *ent;
341
342         if (omap_mmu_internal_memory(mmu, vadr)) {
343                 unsigned long addr = (unsigned long)vadr;
344                 *len = mmu->membase + mmu->memsize - addr;
345                 return addr;
346         }
347
348         /* EXRAM */
349         omap_mmu_for_each_tlb_entry(mmu, ent) {
350                 void *mapadr;
351                 unsigned long mapsize;
352
353                 if (!ent->valid)
354                         continue;
355                 mapadr = (void *)ent->vadr;
356                 mapsize = 1 << (ent->order + PAGE_SHIFT);
357                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
358                         *len = mapadr + mapsize - vadr;
359                         return __pa(ent->buf) + vadr - mapadr;
360                 }
361         }
362
363         /* valid mapping not found */
364         return 0;
365 }
366 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
367
368 /*
369  * MMU operations
370  */
371 static struct cam_ram_regset *
372 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
373 {
374         return mmu->ops->cam_ram_alloc(entry);
375 }
376
377 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
378                                   struct cam_ram_regset *cr)
379 {
380         return mmu->ops->cam_ram_valid(cr);
381 }
382
383 static inline void
384 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
385 {
386         unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
387         int mask;
388
389         mask = (mmu->type == OMAP_MMU_CAMERA) ?
390                         CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
391         tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
392
393         mask = (mmu->type == OMAP_MMU_CAMERA) ?
394                         CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
395         tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
396 }
397
398 static inline void
399 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
400 {
401         omap_mmu_write_reg(mmu,
402                            (lock->base << MMU_LOCK_BASE_SHIFT) |
403                            (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
404 }
405
406 static inline void omap_mmu_flush(struct omap_mmu *mmu)
407 {
408         omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
409 }
410
411 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
412 {
413         omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
414 }
415
416 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
417                        struct cam_ram_regset *cr)
418 {
419         /* set victim */
420         omap_mmu_set_tlb_lock(mmu, lock);
421
422         if (likely(mmu->ops->read_tlb))
423                 mmu->ops->read_tlb(mmu, cr);
424 }
425 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
426
427 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
428 {
429         if (likely(mmu->ops->load_tlb))
430                 mmu->ops->load_tlb(mmu, cr);
431
432         /* flush the entry */
433         omap_mmu_flush(mmu);
434
435         /* load a TLB entry */
436         omap_mmu_ldtlb(mmu);
437 }
438
439 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
440                             struct omap_mmu_tlb_entry *entry)
441 {
442         struct omap_mmu_tlb_lock lock;
443         struct cam_ram_regset *cr;
444
445         clk_enable(mmu->clk);
446         omap_dsp_request_mem();
447
448         omap_mmu_get_tlb_lock(mmu, &lock);
449         for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
450                 struct cam_ram_regset tmp;
451
452                 /* read a TLB entry */
453                 omap_mmu_read_tlb(mmu, &lock, &tmp);
454                 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
455                         goto found_victim;
456         }
457         omap_mmu_set_tlb_lock(mmu, &lock);
458
459 found_victim:
460         /* The last entry cannot be locked? */
461         if (lock.victim == (mmu->nr_tlb_entries - 1)) {
462                 printk(KERN_ERR "MMU: TLB is full.\n");
463                 return -EBUSY;
464         }
465
466         cr = omap_mmu_cam_ram_alloc(mmu, entry);
467         if (IS_ERR(cr))
468                 return PTR_ERR(cr);
469
470         omap_mmu_load_tlb(mmu, cr);
471         kfree(cr);
472
473         /* update lock base */
474         if (lock.victim == lock.base)
475                 lock.base++;
476
477         omap_mmu_set_tlb_lock(mmu, &lock);
478
479         omap_dsp_release_mem();
480         clk_disable(mmu->clk);
481         return 0;
482 }
483 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
484
485 static inline unsigned long
486 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
487 {
488         return mmu->ops->cam_va(cr);
489 }
490
491 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
492 {
493         struct omap_mmu_tlb_lock lock;
494         int i;
495         int max_valid = 0;
496
497         clk_enable(mmu->clk);
498         omap_dsp_request_mem();
499
500         omap_mmu_get_tlb_lock(mmu, &lock);
501         for (i = 0; i < lock.base; i++) {
502                 struct cam_ram_regset cr;
503
504                 /* read a TLB entry */
505                 lock.victim = i;
506                 omap_mmu_read_tlb(mmu, &lock, &cr);
507                 if (!omap_mmu_cam_ram_valid(mmu, &cr))
508                         continue;
509
510                 if (omap_mmu_cam_va(mmu, &cr) == vadr)
511                         /* flush the entry */
512                         omap_mmu_flush(mmu);
513                 else
514                         max_valid = i;
515         }
516
517         /* set new lock base */
518         lock.base = lock.victim = max_valid + 1;
519         omap_mmu_set_tlb_lock(mmu, &lock);
520
521         omap_dsp_release_mem();
522         clk_disable(mmu->clk);
523         return 0;
524 }
525 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
526
527 static void omap_mmu_gflush(struct omap_mmu *mmu)
528 {
529         struct omap_mmu_tlb_lock lock;
530
531         clk_enable(mmu->clk);
532         omap_dsp_request_mem();
533
534         omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
535         lock.base = lock.victim = mmu->nr_exmap_preserved;
536         omap_mmu_set_tlb_lock(mmu, &lock);
537
538         omap_dsp_release_mem();
539         clk_disable(mmu->clk);
540 }
541
542 /*
543  * omap_mmu_exmap()
544  *
545  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
546  * In this case, the buffer for DSP is allocated in this routine,
547  * then it is mapped.
548  * On the other hand, for example - frame buffer sharing, calls
549  * this function with padr set. It means some known address space
550  * pointed with padr is going to be shared with DSP.
551  */
552 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
553                    unsigned long padr, unsigned long size,
554                    enum exmap_type type)
555 {
556         unsigned long pgsz;
557         void *buf;
558         unsigned int order = 0;
559         unsigned long unit;
560         int prev = -1;
561         unsigned long _dspadr = dspadr;
562         unsigned long _padr = padr;
563         void *_vadr = omap_mmu_to_virt(mmu, dspadr);
564         unsigned long _size = size;
565         struct omap_mmu_tlb_entry tlb_ent;
566         struct exmap_tbl *exmap_ent, *tmp_ent;
567         int status;
568         int idx;
569
570 #define MINIMUM_PAGESZ  SZ_4K
571         /*
572          * alignment check
573          */
574         if (!is_aligned(size, MINIMUM_PAGESZ)) {
575                 printk(KERN_ERR
576                        "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
577                 return -EINVAL;
578         }
579         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
580                 printk(KERN_ERR
581                        "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
582                 return -EINVAL;
583         }
584         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
585                 printk(KERN_ERR
586                        "MMU: physical address(0x%lx) is not aligned.\n",
587                        padr);
588                 return -EINVAL;
589         }
590
591         /* address validity check */
592         if ((dspadr < mmu->memsize) ||
593             (dspadr >= (1 << mmu->addrspace))) {
594                 printk(KERN_ERR
595                        "MMU: illegal address/size for %s().\n",
596                        __FUNCTION__);
597                 return -EINVAL;
598         }
599
600         down_write(&mmu->exmap_sem);
601
602         /* overlap check */
603         omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
604                 unsigned long mapsize;
605
606                 if (!tmp_ent->valid)
607                         continue;
608                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
609                 if ((_vadr + size > tmp_ent->vadr) &&
610                     (_vadr < tmp_ent->vadr + mapsize)) {
611                         printk(KERN_ERR "MMU: exmap page overlap!\n");
612                         up_write(&mmu->exmap_sem);
613                         return -EINVAL;
614                 }
615         }
616
617 start:
618         buf = NULL;
619         /* Are there any free TLB lines?  */
620         for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
621                 if (!mmu->exmap_tbl[idx].valid)
622                         goto found_free;
623
624         printk(KERN_ERR "MMU: DSP TLB is full.\n");
625         status = -EBUSY;
626         goto fail;
627
628 found_free:
629         exmap_ent = mmu->exmap_tbl + idx;
630
631         if ((_size >= SZ_1M) &&
632             (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
633             is_aligned(_dspadr, SZ_1M)) {
634                 unit = SZ_1M;
635                 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
636         } else if ((_size >= SZ_64K) &&
637                    (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
638                    is_aligned(_dspadr, SZ_64K)) {
639                 unit = SZ_64K;
640                 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
641         } else {
642                 unit = SZ_4K;
643                 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
644         }
645
646         order = get_order(unit);
647
648         /* buffer allocation */
649         if (type == EXMAP_TYPE_MEM) {
650                 struct page *page, *ps, *pe;
651
652                 if ((order == ORDER_1MB) && likely(mempool_1M))
653                         buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
654                 else if ((order == ORDER_64KB) && likely(mempool_64K))
655                         buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
656                 else {
657                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
658                         if (buf == NULL) {
659                                 status = -ENOMEM;
660                                 goto fail;
661                         }
662                 }
663
664                 /* mark the pages as reserved; this is needed for mmap */
665                 ps = virt_to_page(buf);
666                 pe = virt_to_page(buf + unit);
667
668                 for (page = ps; page < pe; page++)
669                         SetPageReserved(page);
670
671                 _padr = __pa(buf);
672         }
673
674         /*
675          * mapping for ARM MMU:
676          * we should not access to the allocated memory through 'buf'
677          * since this area should not be cached.
678          */
679         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
680         if (status < 0)
681                 goto fail;
682
683         /* loading DSP TLB entry */
684         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
685         status = omap_mmu_load_tlb_entry(mmu, &tlb_ent);
686         if (status < 0) {
687                 exmap_clear_armmmu((unsigned long)_vadr, unit);
688                 goto fail;
689         }
690
691         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
692         exmap_ent->link.prev = prev;
693         if (prev >= 0)
694                 mmu->exmap_tbl[prev].link.next = idx;
695
696         if ((_size -= unit) == 0) {     /* normal completion */
697                 up_write(&mmu->exmap_sem);
698                 return size;
699         }
700
701         _dspadr += unit;
702         _vadr   += unit;
703         _padr = padr ? _padr + unit : 0;
704         prev = idx;
705         goto start;
706
707 fail:
708         up_write(&mmu->exmap_sem);
709         if (buf)
710                 omap_mmu_free_pages((unsigned long)buf, order);
711         omap_mmu_exunmap(mmu, dspadr);
712         return status;
713 }
714 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
715
716 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
717 {
718         unsigned long size;
719
720         /* clearing ARM MMU */
721         size = 1 << (ent->order + PAGE_SHIFT);
722         exmap_clear_armmmu((unsigned long)ent->vadr, size);
723
724         /* freeing allocated memory */
725         if (ent->type == EXMAP_TYPE_MEM) {
726                 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
727                 printk(KERN_DEBUG
728                        "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
729                        size, ent->buf);
730         }
731
732         ent->valid = 0;
733         return size;
734 }
735
736 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
737 {
738         void *vadr;
739         unsigned long size;
740         int total = 0;
741         struct exmap_tbl *ent;
742         int idx;
743
744         vadr = omap_mmu_to_virt(mmu, dspadr);
745         down_write(&mmu->exmap_sem);
746         for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
747                 ent = mmu->exmap_tbl + idx;
748                 if (!ent->valid || ent->prsvd)
749                         continue;
750                 if (ent->vadr == vadr)
751                         goto found_map;
752         }
753         up_write(&mmu->exmap_sem);
754         printk(KERN_WARNING
755                "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
756         return -EINVAL;
757
758 found_map:
759         if (ent->usecount > 0) {
760                 printk(KERN_ERR
761                        "MMU: exmap reference count is not 0.\n"
762                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
763                        idx, ent->vadr, ent->order, ent->usecount);
764                 up_write(&mmu->exmap_sem);
765                 return -EINVAL;
766         }
767         /* clearing DSP TLB entry */
768         omap_mmu_clear_tlb_entry(mmu, dspadr);
769
770         /* clear ARM MMU and free buffer */
771         size = unmap_free_arm(ent);
772         total += size;
773
774         /* we don't free PTEs */
775
776         /* flush TLB */
777         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
778
779         /* check if next mapping is in same group */
780         idx = ent->link.next;
781         if (idx < 0)
782                 goto up_out;    /* normal completion */
783         ent = mmu->exmap_tbl + idx;
784         dspadr += size;
785         vadr   += size;
786         if (ent->vadr == vadr)
787                 goto found_map; /* continue */
788
789         printk(KERN_ERR
790                "MMU: illegal exmap_tbl grouping!\n"
791                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
792                vadr, idx, ent->vadr);
793         up_write(&mmu->exmap_sem);
794         return -EINVAL;
795
796 up_out:
797         up_write(&mmu->exmap_sem);
798         return total;
799 }
800 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
801
802 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
803 {
804         struct exmap_tbl *ent;
805
806         down_write(&mmu->exmap_sem);
807
808         /* clearing TLB entry */
809         omap_mmu_gflush(mmu);
810
811         omap_mmu_for_each_tlb_entry(mmu, ent)
812                 if (ent->valid && !ent->prsvd)
813                         unmap_free_arm(ent);
814
815         /* flush TLB */
816         if (likely(mmu->membase))
817                 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
818                                        mmu->membase + (1 << mmu->addrspace));
819
820         up_write(&mmu->exmap_sem);
821 }
822 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
823
824 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
825                                     unsigned long dspadr, int index)
826 {
827         unsigned long phys;
828         void *virt;
829         struct omap_mmu_tlb_entry tlb_ent;
830
831         phys = __pa(buf);
832         virt = omap_mmu_to_virt(mmu, dspadr);
833         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
834         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
835         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
836         omap_mmu_load_tlb_entry(mmu, &tlb_ent);
837 }
838 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
839
840 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
841 {
842         void *virt = omap_mmu_to_virt(mmu, dspadr);
843
844         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
845         /* DSP MMU is shutting down. not handled here. */
846 }
847 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
848
849 static void omap_mmu_reset(struct omap_mmu *mmu)
850 {
851         int i;
852
853         omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
854
855         for (i = 0; i < 10000; i++)
856                 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
857                         break;
858 }
859
860 void omap_mmu_disable(struct omap_mmu *mmu)
861 {
862         omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
863 }
864 EXPORT_SYMBOL_GPL(omap_mmu_disable);
865
866 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
867 {
868         if (likely(reset))
869                 omap_mmu_reset(mmu);
870
871         omap_mmu_write_reg(mmu, 0x2, MMU_CNTL);
872 }
873 EXPORT_SYMBOL_GPL(omap_mmu_enable);
874
875 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
876 {
877         struct omap_mmu *mmu = dev_id;
878
879         if (likely(mmu->ops->interrupt))
880                 mmu->ops->interrupt(mmu);
881
882         return IRQ_HANDLED;
883 }
884
885 static int omap_mmu_init(struct omap_mmu *mmu)
886 {
887         struct omap_mmu_tlb_lock tlb_lock;
888         int ret = 0;
889
890         clk_enable(mmu->clk);
891         omap_dsp_request_mem();
892         down_write(&mmu->exmap_sem);
893
894         ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
895                           mmu->name,  mmu);
896         if (ret < 0) {
897                 printk(KERN_ERR
898                        "failed to register MMU interrupt: %d\n", ret);
899                 goto fail;
900         }
901
902         omap_mmu_disable(mmu);  /* clear all */
903         udelay(100);
904         omap_mmu_enable(mmu, 1);
905
906         memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
907         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
908
909         if (unlikely(mmu->ops->startup))
910                 ret = mmu->ops->startup(mmu);
911  fail:
912         up_write(&mmu->exmap_sem);
913         omap_dsp_release_mem();
914         clk_disable(mmu->clk);
915
916         return ret;
917 }
918
919 static void omap_mmu_shutdown(struct omap_mmu *mmu)
920 {
921         free_irq(mmu->irq, mmu);
922
923         if (unlikely(mmu->ops->shutdown))
924                 mmu->ops->shutdown(mmu);
925
926         omap_mmu_exmap_flush(mmu);
927         omap_mmu_disable(mmu); /* clear all */
928 }
929
930 /*
931  * omap_mmu_mem_enable() / disable()
932  */
933 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
934 {
935         if (unlikely(mmu->ops->mem_enable))
936                 return mmu->ops->mem_enable(mmu, addr);
937
938         down_read(&mmu->exmap_sem);
939         return 0;
940 }
941 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
942
943 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
944 {
945         if (unlikely(mmu->ops->mem_disable)) {
946                 mmu->ops->mem_disable(mmu, addr);
947                 return;
948         }
949
950         up_read(&mmu->exmap_sem);
951 }
952 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
953
954 /*
955  * dsp_mem file operations
956  */
957 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
958                            loff_t *ppos)
959 {
960         unsigned long p = *ppos;
961         void *vadr = omap_mmu_to_virt(mmu, p);
962         ssize_t size = mmu->memsize;
963         ssize_t read;
964
965         if (p >= size)
966                 return 0;
967         clk_enable(mmu->memclk);
968         read = count;
969         if (count > size - p)
970                 read = size - p;
971         if (copy_to_user(buf, vadr, read)) {
972                 read = -EFAULT;
973                 goto out;
974         }
975         *ppos += read;
976 out:
977         clk_disable(mmu->memclk);
978         return read;
979 }
980
981 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
982                           loff_t *ppos)
983 {
984         unsigned long p = *ppos;
985         void *vadr = omap_mmu_to_virt(mmu, p);
986
987         if (!exmap_valid(mmu, vadr, count)) {
988                 printk(KERN_ERR
989                        "MMU: DSP address %08lx / size %08x "
990                        "is not valid!\n", p, count);
991                 return -EFAULT;
992         }
993         if (count > (1 << mmu->addrspace) - p)
994                 count = (1 << mmu->addrspace) - p;
995         if (copy_to_user(buf, vadr, count))
996                 return -EFAULT;
997         *ppos += count;
998
999         return count;
1000 }
1001
1002 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1003                                  loff_t offset, size_t count)
1004 {
1005         struct device *dev = to_dev(kobj);
1006         struct omap_mmu *mmu = dev_get_drvdata(dev);
1007         unsigned long p = (unsigned long)offset;
1008         void *vadr = omap_mmu_to_virt(mmu, p);
1009         int ret;
1010
1011         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1012                 return -EBUSY;
1013
1014         if (p < mmu->memsize)
1015                 ret = intmem_read(mmu, buf, count, &offset);
1016         else
1017                 ret = exmem_read(mmu, buf, count, &offset);
1018
1019         omap_mmu_mem_disable(mmu, vadr);
1020
1021         return ret;
1022 }
1023
1024 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1025                             loff_t *ppos)
1026 {
1027         unsigned long p = *ppos;
1028         void *vadr = omap_mmu_to_virt(mmu, p);
1029         ssize_t size = mmu->memsize;
1030         ssize_t written;
1031
1032         if (p >= size)
1033                 return 0;
1034         clk_enable(mmu->memclk);
1035         written = count;
1036         if (count > size - p)
1037                 written = size - p;
1038         if (copy_from_user(vadr, buf, written)) {
1039                 written = -EFAULT;
1040                 goto out;
1041         }
1042         *ppos += written;
1043 out:
1044         clk_disable(mmu->memclk);
1045         return written;
1046 }
1047
1048 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1049                            loff_t *ppos)
1050 {
1051         unsigned long p = *ppos;
1052         void *vadr = omap_mmu_to_virt(mmu, p);
1053
1054         if (!exmap_valid(mmu, vadr, count)) {
1055                 printk(KERN_ERR
1056                        "MMU: DSP address %08lx / size %08x "
1057                        "is not valid!\n", p, count);
1058                 return -EFAULT;
1059         }
1060         if (count > (1 << mmu->addrspace) - p)
1061                 count = (1 << mmu->addrspace) - p;
1062         if (copy_from_user(vadr, buf, count))
1063                 return -EFAULT;
1064         *ppos += count;
1065
1066         return count;
1067 }
1068
1069 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1070                                   loff_t offset, size_t count)
1071 {
1072         struct device *dev = to_dev(kobj);
1073         struct omap_mmu *mmu = dev_get_drvdata(dev);
1074         unsigned long p = (unsigned long)offset;
1075         void *vadr = omap_mmu_to_virt(mmu, p);
1076         int ret;
1077
1078         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1079                 return -EBUSY;
1080
1081         if (p < mmu->memsize)
1082                 ret = intmem_write(mmu, buf, count, &offset);
1083         else
1084                 ret = exmem_write(mmu, buf, count, &offset);
1085
1086         omap_mmu_mem_disable(mmu, vadr);
1087
1088         return ret;
1089 }
1090
1091 static struct bin_attribute dev_attr_mem = {
1092         .attr   = {
1093                 .name   = "mem",
1094                 .owner  = THIS_MODULE,
1095                 .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
1096         },
1097
1098         .read   = omap_mmu_mem_read,
1099         .write  = omap_mmu_mem_write,
1100 };
1101
1102 /* To be obsolete for backward compatibility */
1103 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1104                             loff_t offset, size_t count)
1105 {
1106         return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1107 }
1108 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1109
1110 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1111                              loff_t offset, size_t count)
1112 {
1113         return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1114 }
1115 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1116
1117 /*
1118  * sysfs files
1119  */
1120 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1121                              char *buf)
1122 {
1123         struct omap_mmu *mmu = dev_get_drvdata(dev);
1124         struct omap_mmu_tlb_lock tlb_lock;
1125         int ret = -EIO;
1126
1127         clk_enable(mmu->clk);
1128         omap_dsp_request_mem();
1129
1130         down_read(&mmu->exmap_sem);
1131
1132         omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1133
1134         if (likely(mmu->ops->show))
1135                 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1136
1137         /* restore victim entry */
1138         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1139
1140         up_read(&mmu->exmap_sem);
1141         omap_dsp_release_mem();
1142         clk_disable(mmu->clk);
1143
1144         return ret;
1145 }
1146
1147 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1148
1149 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1150                           char *buf)
1151 {
1152         struct omap_mmu *mmu = dev_get_drvdata(dev);
1153         struct exmap_tbl *ent;
1154         int len;
1155         int i = 0;
1156
1157         down_read(&mmu->exmap_sem);
1158         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
1159                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
1160
1161         omap_mmu_for_each_tlb_entry(mmu, ent) {
1162                 void *vadr;
1163                 unsigned long size;
1164                 enum exmap_type type;
1165                 int idx;
1166
1167                 /* find a top of link */
1168                 if (!ent->valid || (ent->link.prev >= 0))
1169                         continue;
1170
1171                 vadr = ent->vadr;
1172                 type = ent->type;
1173                 size = 0;
1174                 idx = i;
1175                 do {
1176                         ent = mmu->exmap_tbl + idx;
1177                         size += PAGE_SIZE << ent->order;
1178                 } while ((idx = ent->link.next) >= 0);
1179
1180                 len += sprintf(buf + len, "0x%06lx %#8lx",
1181                                virt_to_omap_mmu(mmu, vadr), size);
1182
1183                 if (type == EXMAP_TYPE_FB) {
1184                         len += sprintf(buf + len, "    framebuf\n");
1185                 } else {
1186                         len += sprintf(buf + len, "\n");
1187                         idx = i;
1188                         do {
1189                                 ent = mmu->exmap_tbl + idx;
1190                                 len += sprintf(buf + len,
1191                                                /* 0xc0171000 0x100000  0*/
1192                                                "%19s0x%8p %#8lx %2d\n",
1193                                                "", ent->buf,
1194                                                PAGE_SIZE << ent->order,
1195                                                ent->usecount);
1196                         } while ((idx = ent->link.next) >= 0);
1197                 }
1198
1199                 i++;
1200         }
1201
1202         up_read(&mmu->exmap_sem);
1203         return len;
1204 }
1205
1206 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1207                            const char *buf,
1208                            size_t count)
1209 {
1210         struct omap_mmu *mmu = dev_get_drvdata(dev);
1211         unsigned long base = 0, len = 0;
1212         int ret;
1213
1214         sscanf(buf, "%lx %lx", &base, &len);
1215
1216         if (!base)
1217                 return -EINVAL;
1218
1219         if (len) {
1220                 /* Add the mapping */
1221                 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1222                 if (ret < 0)
1223                         return ret;
1224         } else {
1225                 /* Remove the mapping */
1226                 ret = omap_mmu_exunmap(mmu, base);
1227                 if (ret < 0)
1228                         return ret;
1229         }
1230
1231         return count;
1232 }
1233
1234 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1235
1236 static ssize_t mempool_show(struct class *class, char *buf)
1237 {
1238         int min_nr_1M = 0, curr_nr_1M = 0;
1239         int min_nr_64K = 0, curr_nr_64K = 0;
1240         int total = 0;
1241
1242         if (likely(mempool_1M)) {
1243                 min_nr_1M  = mempool_1M->min_nr;
1244                 curr_nr_1M = mempool_1M->curr_nr;
1245                 total += min_nr_1M * SZ_1M;
1246         }
1247         if (likely(mempool_64K)) {
1248                 min_nr_64K  = mempool_64K->min_nr;
1249                 curr_nr_64K = mempool_64K->curr_nr;
1250                 total += min_nr_64K * SZ_64K;
1251         }
1252
1253         return sprintf(buf,
1254                        "0x%x\n"
1255                        "1M  buffer: %d (%d free)\n"
1256                        "64K buffer: %d (%d free)\n",
1257                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1258 }
1259
1260
1261 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1262
1263 static void omap_mmu_class_dev_release(struct device *dev)
1264 {
1265 }
1266
1267 static struct class omap_mmu_class = {
1268         .name           = "mmu",
1269         .dev_release    = omap_mmu_class_dev_release,
1270 };
1271
1272 int omap_mmu_register(struct omap_mmu *mmu)
1273 {
1274         int ret;
1275
1276         mmu->dev.class = &omap_mmu_class;
1277         strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1278         dev_set_drvdata(&mmu->dev, mmu);
1279
1280         mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1281                                  GFP_KERNEL);
1282         if (!mmu->exmap_tbl)
1283                 return -ENOMEM;
1284
1285         ret = device_register(&mmu->dev);
1286         if (unlikely(ret))
1287                 goto err_dev_register;
1288
1289         init_rwsem(&mmu->exmap_sem);
1290
1291         ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1292         printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1293                mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1294
1295         ret = omap_mmu_init(mmu);
1296         if (unlikely(ret))
1297                 goto err_mmu_init;
1298
1299         ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1300         if (unlikely(ret))
1301                 goto err_dev_create_mmu;
1302         ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1303         if (unlikely(ret))
1304                 goto err_dev_create_exmap;
1305
1306         if (likely(mmu->membase)) {
1307                 dev_attr_mem.size = mmu->memsize;
1308                 ret = device_create_bin_file(&mmu->dev,
1309                                              &dev_attr_mem);
1310                 if (unlikely(ret))
1311                         goto err_bin_create_mem;
1312         }
1313
1314         return 0;
1315
1316 err_bin_create_mem:
1317         device_remove_file(&mmu->dev, &dev_attr_exmap);
1318 err_dev_create_exmap:
1319         device_remove_file(&mmu->dev, &dev_attr_mmu);
1320 err_dev_create_mmu:
1321         omap_mmu_shutdown(mmu);
1322 err_mmu_init:
1323         device_unregister(&mmu->dev);
1324 err_dev_register:
1325         kfree(mmu->exmap_tbl);
1326         mmu->exmap_tbl = NULL;
1327         return ret;
1328 }
1329 EXPORT_SYMBOL_GPL(omap_mmu_register);
1330
1331 void omap_mmu_unregister(struct omap_mmu *mmu)
1332 {
1333         omap_mmu_shutdown(mmu);
1334         omap_mmu_kmem_release();
1335
1336         device_remove_file(&mmu->dev, &dev_attr_mmu);
1337         device_remove_file(&mmu->dev, &dev_attr_exmap);
1338
1339         if (likely(mmu->membase))
1340                 device_remove_bin_file(&mmu->dev,
1341                                              &dev_attr_mem);
1342
1343         kfree(mmu->exmap_tbl);
1344         mmu->exmap_tbl = NULL;
1345
1346         device_unregister(&mmu->dev);
1347 }
1348 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1349
1350 static int __init omap_mmu_class_init(void)
1351 {
1352         int ret = class_register(&omap_mmu_class);
1353         if (!ret)
1354                 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1355
1356         return ret;
1357 }
1358
1359 static void __exit omap_mmu_class_exit(void)
1360 {
1361         class_remove_file(&omap_mmu_class, &class_attr_mempool);
1362         class_unregister(&omap_mmu_class);
1363 }
1364
1365 subsys_initcall(omap_mmu_class_init);
1366 module_exit(omap_mmu_class_exit);
1367
1368 MODULE_LICENSE("GPL");