]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/mmu.c
6b5868e18b56d62bf08739157ed6cae446e27267
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / mmu.c
1 /*
2  * linux/arch/arm/plat-omap/mmu.c
3  *
4  * OMAP MMU management framework
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24  */
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/clk.h>
30 #include <linux/device.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/arch/mmu.h>
36 #include <asm/sizes.h>
37
38 #if defined(CONFIG_ARCH_OMAP1)
39 #include "../mach-omap1/mmu.h"
40 #elif defined(CONFIG_ARCH_OMAP2)
41 #include "../mach-omap2/mmu.h"
42 #endif
43
44 /*
45  * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
46  * MMU has base and victim implemented in different bits in the LOCK
47  * register (shifts are still the same), all of the other registers are
48  * the same on all of the MMUs..
49  */
50 #define MMU_LOCK_BASE_SHIFT             10
51 #define MMU_LOCK_VICTIM_SHIFT           4
52
53 #define CAMERA_MMU_LOCK_BASE_MASK       (0x7 << MMU_LOCK_BASE_SHIFT)
54 #define CAMERA_MMU_LOCK_VICTIM_MASK     (0x7 << MMU_LOCK_VICTIM_SHIFT)
55
56 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
57 #define ORDER_1MB       (20 - PAGE_SHIFT)
58 #define ORDER_64KB      (16 - PAGE_SHIFT)
59 #define ORDER_4KB       (12 - PAGE_SHIFT)
60
61 static mempool_t *mempool_1M;
62 static mempool_t *mempool_64K;
63
64 #define omap_mmu_for_each_tlb_entry(mmu, entry)                 \
65         for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
66              entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
67              entry++)
68
69 #define to_dev(obj)     container_of(obj, struct device, kobj)
70
71 static void *mempool_alloc_from_pool(mempool_t *pool,
72                                      unsigned int __nocast gfp_mask)
73 {
74         spin_lock_irq(&pool->lock);
75         if (likely(pool->curr_nr)) {
76                 void *element = pool->elements[--pool->curr_nr];
77                 spin_unlock_irq(&pool->lock);
78                 return element;
79         }
80
81         spin_unlock_irq(&pool->lock);
82         return mempool_alloc(pool, gfp_mask);
83 }
84
85 /*
86  * kmem_reserve(), kmem_release():
87  * reserve or release kernel memory for exmap().
88  *
89  * exmap() might request consecutive 1MB or 64kB,
90  * but it will be difficult after memory pages are fragmented.
91  * So, user can reserve such memory blocks in the early phase
92  * through kmem_reserve().
93  */
94 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
95 {
96         return (void *)__get_dma_pages(gfp, (unsigned int)order);
97 }
98
99 static void omap_mmu_pool_free(void *buf, void *order)
100 {
101         free_pages((unsigned long)buf, (unsigned int)order);
102 }
103
104 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
105 {
106         unsigned long len = size;
107
108         /* alignment check */
109         if (!is_aligned(size, SZ_64K)) {
110                 printk(KERN_ERR
111                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
112                 return -EINVAL;
113         }
114
115         if (size > (1 << mmu->addrspace)) {
116                 printk(KERN_ERR
117                        "omapdsp: size(0x%lx) is larger than DSP memory space "
118                        "size (0x%x.\n", size, (1 << mmu->addrspace));
119                 return -EINVAL;
120         }
121
122         if (size >= SZ_1M) {
123                 int nr = size >> 20;
124
125                 if (likely(!mempool_1M))
126                         mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
127                                                     omap_mmu_pool_free,
128                                                     (void *)ORDER_1MB);
129                 else
130                         mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
131                                        GFP_KERNEL);
132
133                 size &= ~(0xf << 20);
134         }
135
136         if (size >= SZ_64K) {
137                 int nr = size >> 16;
138
139                 if (likely(!mempool_64K))
140                         mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
141                                                      omap_mmu_pool_free,
142                                                      (void *)ORDER_64KB);
143                 else
144                         mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
145                                        GFP_KERNEL);
146
147                 size &= ~(0xf << 16);
148         }
149
150         if (size)
151                 len -= size;
152
153         return len;
154 }
155 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
156
157 void omap_mmu_kmem_release(void)
158 {
159         if (mempool_64K) {
160                 mempool_destroy(mempool_64K);
161                 mempool_64K = NULL;
162         }
163
164         if (mempool_1M) {
165                 mempool_destroy(mempool_1M);
166                 mempool_1M = NULL;
167         }
168 }
169 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
170
171 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
172 {
173         struct page *page, *ps, *pe;
174
175         ps = virt_to_page(buf);
176         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
177
178         for (page = ps; page < pe; page++)
179                 ClearPageReserved(page);
180
181         if ((order == ORDER_64KB) && likely(mempool_64K))
182                 mempool_free((void *)buf, mempool_64K);
183         else if ((order == ORDER_1MB) && likely(mempool_1M))
184                 mempool_free((void *)buf, mempool_1M);
185         else
186                 free_pages(buf, order);
187 }
188
189 /*
190  * ARM MMU operations
191  */
192 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
193 {
194         long off;
195         unsigned long sz_left;
196         pmd_t *pmdp;
197         pte_t *ptep;
198         int prot_pmd, prot_pte;
199
200         printk(KERN_DEBUG
201                "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
202                virt, phys, size);
203
204         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
205         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
206
207         pmdp = pmd_offset(pgd_offset_k(virt), virt);
208         if (pmd_none(*pmdp)) {
209                 ptep = pte_alloc_one_kernel(&init_mm, 0);
210                 if (ptep == NULL)
211                         return -ENOMEM;
212                 /* note: two PMDs will be set  */
213                 pmd_populate_kernel(&init_mm, pmdp, ptep);
214         }
215
216         off = phys - virt;
217         for (sz_left = size;
218              sz_left >= PAGE_SIZE;
219              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
220                 ptep = pte_offset_kernel(pmdp, virt);
221                 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
222         }
223         if (sz_left)
224                 BUG();
225
226         return 0;
227 }
228 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
229
230 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
231 {
232         unsigned long sz_left;
233         pmd_t *pmdp;
234         pte_t *ptep;
235
236         printk(KERN_DEBUG
237                "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
238                virt, size);
239
240         for (sz_left = size;
241              sz_left >= PAGE_SIZE;
242              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
243                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
244                 ptep = pte_offset_kernel(pmdp, virt);
245                 pte_clear(&init_mm, virt, ptep);
246         }
247         if (sz_left)
248                 BUG();
249 }
250 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
251
252 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
253 {
254         /* exmap_sem should be held before calling this function */
255         struct exmap_tbl *ent;
256
257 start:
258         omap_mmu_for_each_tlb_entry(mmu, ent) {
259                 void *mapadr;
260                 unsigned long mapsize;
261
262                 if (!ent->valid)
263                         continue;
264                 mapadr = (void *)ent->vadr;
265                 mapsize = 1 << (ent->order + PAGE_SHIFT);
266                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
267                         if (vadr + len <= mapadr + mapsize) {
268                                 /* this map covers whole address. */
269                                 return 1;
270                         } else {
271                                 /*
272                                  * this map covers partially.
273                                  * check rest portion.
274                                  */
275                                 len -= mapadr + mapsize - vadr;
276                                 vadr = mapadr + mapsize;
277                                 goto start;
278                         }
279                 }
280         }
281
282         return 0;
283 }
284 EXPORT_SYMBOL_GPL(exmap_valid);
285
286 /*
287  * omap_mmu_exmap_use(), unuse():
288  * when the mapped area is exported to user space with mmap,
289  * the usecount is incremented.
290  * while the usecount > 0, that area can't be released.
291  */
292 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
293 {
294         struct exmap_tbl *ent;
295
296         down_write(&mmu->exmap_sem);
297         omap_mmu_for_each_tlb_entry(mmu, ent) {
298                 void *mapadr;
299                 unsigned long mapsize;
300
301                 if (!ent->valid)
302                         continue;
303                 mapadr = (void *)ent->vadr;
304                 mapsize = 1 << (ent->order + PAGE_SHIFT);
305                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
306                         ent->usecount++;
307         }
308         up_write(&mmu->exmap_sem);
309 }
310 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
311
312 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
313 {
314         struct exmap_tbl *ent;
315
316         down_write(&mmu->exmap_sem);
317         omap_mmu_for_each_tlb_entry(mmu, ent) {
318                 void *mapadr;
319                 unsigned long mapsize;
320
321                 if (!ent->valid)
322                         continue;
323                 mapadr = (void *)ent->vadr;
324                 mapsize = 1 << (ent->order + PAGE_SHIFT);
325                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
326                         ent->usecount--;
327         }
328         up_write(&mmu->exmap_sem);
329 }
330 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
331
332 /*
333  * omap_mmu_virt_to_phys()
334  * returns physical address, and sets len to valid length
335  */
336 unsigned long
337 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
338 {
339         struct exmap_tbl *ent;
340
341         if (omap_mmu_internal_memory(mmu, vadr)) {
342                 unsigned long addr = (unsigned long)vadr;
343                 *len = mmu->membase + mmu->memsize - addr;
344                 return addr;
345         }
346
347         /* EXRAM */
348         omap_mmu_for_each_tlb_entry(mmu, ent) {
349                 void *mapadr;
350                 unsigned long mapsize;
351
352                 if (!ent->valid)
353                         continue;
354                 mapadr = (void *)ent->vadr;
355                 mapsize = 1 << (ent->order + PAGE_SHIFT);
356                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
357                         *len = mapadr + mapsize - vadr;
358                         return __pa(ent->buf) + vadr - mapadr;
359                 }
360         }
361
362         /* valid mapping not found */
363         return 0;
364 }
365 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
366
367 /*
368  * MMU operations
369  */
370 static struct cam_ram_regset *
371 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
372 {
373         return mmu->ops->cam_ram_alloc(entry);
374 }
375
376 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
377                                   struct cam_ram_regset *cr)
378 {
379         return mmu->ops->cam_ram_valid(cr);
380 }
381
382 static inline void
383 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
384 {
385         unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
386         int mask;
387
388         mask = (mmu->type == OMAP_MMU_CAMERA) ?
389                         CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
390         tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
391
392         mask = (mmu->type == OMAP_MMU_CAMERA) ?
393                         CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
394         tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
395 }
396
397 static inline void
398 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
399 {
400         omap_mmu_write_reg(mmu,
401                            (lock->base << MMU_LOCK_BASE_SHIFT) |
402                            (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
403 }
404
405 static inline void omap_mmu_flush(struct omap_mmu *mmu)
406 {
407         omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
408 }
409
410 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
411 {
412         omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
413 }
414
415 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
416                        struct cam_ram_regset *cr)
417 {
418         /* set victim */
419         omap_mmu_set_tlb_lock(mmu, lock);
420
421         if (likely(mmu->ops->read_tlb))
422                 mmu->ops->read_tlb(mmu, cr);
423 }
424 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
425
426 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
427 {
428         if (likely(mmu->ops->load_tlb))
429                 mmu->ops->load_tlb(mmu, cr);
430
431         /* flush the entry */
432         omap_mmu_flush(mmu);
433
434         /* load a TLB entry */
435         omap_mmu_ldtlb(mmu);
436 }
437
438 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
439                             struct omap_mmu_tlb_entry *entry)
440 {
441         struct omap_mmu_tlb_lock lock;
442         struct cam_ram_regset *cr;
443
444         clk_enable(mmu->clk);
445         omap_dsp_request_mem();
446
447         omap_mmu_get_tlb_lock(mmu, &lock);
448         for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
449                 struct cam_ram_regset tmp;
450
451                 /* read a TLB entry */
452                 omap_mmu_read_tlb(mmu, &lock, &tmp);
453                 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
454                         goto found_victim;
455         }
456         omap_mmu_set_tlb_lock(mmu, &lock);
457
458 found_victim:
459         /* The last entry cannot be locked? */
460         if (lock.victim == (mmu->nr_tlb_entries - 1)) {
461                 printk(KERN_ERR "MMU: TLB is full.\n");
462                 return -EBUSY;
463         }
464
465         cr = omap_mmu_cam_ram_alloc(mmu, entry);
466         if (IS_ERR(cr))
467                 return PTR_ERR(cr);
468
469         omap_mmu_load_tlb(mmu, cr);
470         kfree(cr);
471
472         /* update lock base */
473         if (lock.victim == lock.base)
474                 lock.base++;
475
476         omap_mmu_set_tlb_lock(mmu, &lock);
477
478         omap_dsp_release_mem();
479         clk_disable(mmu->clk);
480         return 0;
481 }
482 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
483
484 static inline unsigned long
485 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
486 {
487         return mmu->ops->cam_va(cr);
488 }
489
490 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
491 {
492         struct omap_mmu_tlb_lock lock;
493         int i;
494         int max_valid = 0;
495
496         clk_enable(mmu->clk);
497         omap_dsp_request_mem();
498
499         omap_mmu_get_tlb_lock(mmu, &lock);
500         for (i = 0; i < lock.base; i++) {
501                 struct cam_ram_regset cr;
502
503                 /* read a TLB entry */
504                 lock.victim = i;
505                 omap_mmu_read_tlb(mmu, &lock, &cr);
506                 if (!omap_mmu_cam_ram_valid(mmu, &cr))
507                         continue;
508
509                 if (omap_mmu_cam_va(mmu, &cr) == vadr)
510                         /* flush the entry */
511                         omap_mmu_flush(mmu);
512                 else
513                         max_valid = i;
514         }
515
516         /* set new lock base */
517         lock.base = lock.victim = max_valid + 1;
518         omap_mmu_set_tlb_lock(mmu, &lock);
519
520         omap_dsp_release_mem();
521         clk_disable(mmu->clk);
522         return 0;
523 }
524 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
525
526 static void omap_mmu_gflush(struct omap_mmu *mmu)
527 {
528         struct omap_mmu_tlb_lock lock;
529
530         clk_enable(mmu->clk);
531         omap_dsp_request_mem();
532
533         omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
534         lock.base = lock.victim = mmu->nr_exmap_preserved;
535         omap_mmu_set_tlb_lock(mmu, &lock);
536
537         omap_dsp_release_mem();
538         clk_disable(mmu->clk);
539 }
540
541 /*
542  * omap_mmu_exmap()
543  *
544  * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
545  * In this case, the buffer for DSP is allocated in this routine,
546  * then it is mapped.
547  * On the other hand, for example - frame buffer sharing, calls
548  * this function with padr set. It means some known address space
549  * pointed with padr is going to be shared with DSP.
550  */
551 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
552                    unsigned long padr, unsigned long size,
553                    enum exmap_type type)
554 {
555         unsigned long pgsz;
556         void *buf;
557         unsigned int order = 0;
558         unsigned long unit;
559         int prev = -1;
560         unsigned long _dspadr = dspadr;
561         unsigned long _padr = padr;
562         void *_vadr = omap_mmu_to_virt(mmu, dspadr);
563         unsigned long _size = size;
564         struct omap_mmu_tlb_entry tlb_ent;
565         struct exmap_tbl *exmap_ent, *tmp_ent;
566         int status;
567         int idx;
568
569 #define MINIMUM_PAGESZ  SZ_4K
570         /*
571          * alignment check
572          */
573         if (!is_aligned(size, MINIMUM_PAGESZ)) {
574                 printk(KERN_ERR
575                        "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
576                 return -EINVAL;
577         }
578         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
579                 printk(KERN_ERR
580                        "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
581                 return -EINVAL;
582         }
583         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
584                 printk(KERN_ERR
585                        "MMU: physical address(0x%lx) is not aligned.\n",
586                        padr);
587                 return -EINVAL;
588         }
589
590         /* address validity check */
591         if ((dspadr < mmu->memsize) ||
592             (dspadr >= (1 << mmu->addrspace))) {
593                 printk(KERN_ERR
594                        "MMU: illegal address/size for %s().\n",
595                        __FUNCTION__);
596                 return -EINVAL;
597         }
598
599         down_write(&mmu->exmap_sem);
600
601         /* overlap check */
602         omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
603                 unsigned long mapsize;
604
605                 if (!tmp_ent->valid)
606                         continue;
607                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
608                 if ((_vadr + size > tmp_ent->vadr) &&
609                     (_vadr < tmp_ent->vadr + mapsize)) {
610                         printk(KERN_ERR "MMU: exmap page overlap!\n");
611                         up_write(&mmu->exmap_sem);
612                         return -EINVAL;
613                 }
614         }
615
616 start:
617         buf = NULL;
618         /* Are there any free TLB lines?  */
619         for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
620                 if (!mmu->exmap_tbl[idx].valid)
621                         goto found_free;
622
623         printk(KERN_ERR "MMU: DSP TLB is full.\n");
624         status = -EBUSY;
625         goto fail;
626
627 found_free:
628         exmap_ent = mmu->exmap_tbl + idx;
629
630         if ((_size >= SZ_1M) &&
631             (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
632             is_aligned(_dspadr, SZ_1M)) {
633                 unit = SZ_1M;
634                 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
635         } else if ((_size >= SZ_64K) &&
636                    (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
637                    is_aligned(_dspadr, SZ_64K)) {
638                 unit = SZ_64K;
639                 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
640         } else {
641                 unit = SZ_4K;
642                 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
643         }
644
645         order = get_order(unit);
646
647         /* buffer allocation */
648         if (type == EXMAP_TYPE_MEM) {
649                 struct page *page, *ps, *pe;
650
651                 if ((order == ORDER_1MB) && likely(mempool_1M))
652                         buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
653                 else if ((order == ORDER_64KB) && likely(mempool_64K))
654                         buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
655                 else {
656                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
657                         if (buf == NULL) {
658                                 status = -ENOMEM;
659                                 goto fail;
660                         }
661                 }
662
663                 /* mark the pages as reserved; this is needed for mmap */
664                 ps = virt_to_page(buf);
665                 pe = virt_to_page(buf + unit);
666
667                 for (page = ps; page < pe; page++)
668                         SetPageReserved(page);
669
670                 _padr = __pa(buf);
671         }
672
673         /*
674          * mapping for ARM MMU:
675          * we should not access to the allocated memory through 'buf'
676          * since this area should not be cached.
677          */
678         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
679         if (status < 0)
680                 goto fail;
681
682         /* loading DSP TLB entry */
683         INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
684         status = omap_mmu_load_tlb_entry(mmu, &tlb_ent);
685         if (status < 0) {
686                 exmap_clear_armmmu((unsigned long)_vadr, unit);
687                 goto fail;
688         }
689
690         INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
691         exmap_ent->link.prev = prev;
692         if (prev >= 0)
693                 mmu->exmap_tbl[prev].link.next = idx;
694
695         if ((_size -= unit) == 0) {     /* normal completion */
696                 up_write(&mmu->exmap_sem);
697                 return size;
698         }
699
700         _dspadr += unit;
701         _vadr   += unit;
702         _padr = padr ? _padr + unit : 0;
703         prev = idx;
704         goto start;
705
706 fail:
707         up_write(&mmu->exmap_sem);
708         if (buf)
709                 omap_mmu_free_pages((unsigned long)buf, order);
710         omap_mmu_exunmap(mmu, dspadr);
711         return status;
712 }
713 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
714
715 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
716 {
717         unsigned long size;
718
719         /* clearing ARM MMU */
720         size = 1 << (ent->order + PAGE_SHIFT);
721         exmap_clear_armmmu((unsigned long)ent->vadr, size);
722
723         /* freeing allocated memory */
724         if (ent->type == EXMAP_TYPE_MEM) {
725                 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
726                 printk(KERN_DEBUG
727                        "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
728                        size, ent->buf);
729         }
730
731         ent->valid = 0;
732         return size;
733 }
734
735 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
736 {
737         void *vadr;
738         unsigned long size;
739         int total = 0;
740         struct exmap_tbl *ent;
741         int idx;
742
743         vadr = omap_mmu_to_virt(mmu, dspadr);
744         down_write(&mmu->exmap_sem);
745         for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
746                 ent = mmu->exmap_tbl + idx;
747                 if (!ent->valid || ent->prsvd)
748                         continue;
749                 if (ent->vadr == vadr)
750                         goto found_map;
751         }
752         up_write(&mmu->exmap_sem);
753         printk(KERN_WARNING
754                "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
755         return -EINVAL;
756
757 found_map:
758         if (ent->usecount > 0) {
759                 printk(KERN_ERR
760                        "MMU: exmap reference count is not 0.\n"
761                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
762                        idx, ent->vadr, ent->order, ent->usecount);
763                 up_write(&mmu->exmap_sem);
764                 return -EINVAL;
765         }
766         /* clearing DSP TLB entry */
767         omap_mmu_clear_tlb_entry(mmu, dspadr);
768
769         /* clear ARM MMU and free buffer */
770         size = unmap_free_arm(ent);
771         total += size;
772
773         /* we don't free PTEs */
774
775         /* flush TLB */
776         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
777
778         /* check if next mapping is in same group */
779         idx = ent->link.next;
780         if (idx < 0)
781                 goto up_out;    /* normal completion */
782         ent = mmu->exmap_tbl + idx;
783         dspadr += size;
784         vadr   += size;
785         if (ent->vadr == vadr)
786                 goto found_map; /* continue */
787
788         printk(KERN_ERR
789                "MMU: illegal exmap_tbl grouping!\n"
790                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
791                vadr, idx, ent->vadr);
792         up_write(&mmu->exmap_sem);
793         return -EINVAL;
794
795 up_out:
796         up_write(&mmu->exmap_sem);
797         return total;
798 }
799 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
800
801 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
802 {
803         struct exmap_tbl *ent;
804
805         down_write(&mmu->exmap_sem);
806
807         /* clearing TLB entry */
808         omap_mmu_gflush(mmu);
809
810         omap_mmu_for_each_tlb_entry(mmu, ent)
811                 if (ent->valid && !ent->prsvd)
812                         unmap_free_arm(ent);
813
814         /* flush TLB */
815         if (likely(mmu->membase))
816                 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
817                                        mmu->membase + (1 << mmu->addrspace));
818
819         up_write(&mmu->exmap_sem);
820 }
821 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
822
823 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
824                                     unsigned long dspadr, int index)
825 {
826         unsigned long phys;
827         void *virt;
828         struct omap_mmu_tlb_entry tlb_ent;
829
830         phys = __pa(buf);
831         virt = omap_mmu_to_virt(mmu, dspadr);
832         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
833         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
834         INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
835         omap_mmu_load_tlb_entry(mmu, &tlb_ent);
836 }
837 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
838
839 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
840 {
841         void *virt = omap_mmu_to_virt(mmu, dspadr);
842
843         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
844         /* DSP MMU is shutting down. not handled here. */
845 }
846 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
847
848 static void omap_mmu_reset(struct omap_mmu *mmu)
849 {
850         int i;
851
852         omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
853
854         for (i = 0; i < 10000; i++)
855                 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
856                         break;
857 }
858
859 void omap_mmu_disable(struct omap_mmu *mmu)
860 {
861         omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
862 }
863 EXPORT_SYMBOL_GPL(omap_mmu_disable);
864
865 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
866 {
867         if (likely(reset))
868                 omap_mmu_reset(mmu);
869
870         omap_mmu_write_reg(mmu, 0x2, MMU_CNTL);
871 }
872 EXPORT_SYMBOL_GPL(omap_mmu_enable);
873
874 static int omap_mmu_init(struct omap_mmu *mmu)
875 {
876         struct omap_mmu_tlb_lock tlb_lock;
877         int ret = 0;
878
879         clk_enable(mmu->clk);
880         omap_dsp_request_mem();
881         down_write(&mmu->exmap_sem);
882
883         omap_mmu_disable(mmu);  /* clear all */
884         udelay(100);
885         omap_mmu_enable(mmu, 1);
886
887         memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
888         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
889
890         if (unlikely(mmu->ops->startup))
891                 ret = mmu->ops->startup(mmu);
892
893         up_write(&mmu->exmap_sem);
894         omap_dsp_release_mem();
895         clk_disable(mmu->clk);
896
897         return ret;
898 }
899
900 static void omap_mmu_shutdown(struct omap_mmu *mmu)
901 {
902         if (unlikely(mmu->ops->shutdown))
903                 mmu->ops->shutdown(mmu);
904
905         omap_mmu_exmap_flush(mmu);
906         omap_mmu_disable(mmu); /* clear all */
907 }
908
909 /*
910  * omap_mmu_mem_enable() / disable()
911  */
912 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
913 {
914         if (unlikely(mmu->ops->mem_enable))
915                 return mmu->ops->mem_enable(mmu, addr);
916
917         down_read(&mmu->exmap_sem);
918         return 0;
919 }
920 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
921
922 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
923 {
924         if (unlikely(mmu->ops->mem_disable)) {
925                 mmu->ops->mem_disable(mmu, addr);
926                 return;
927         }
928
929         up_read(&mmu->exmap_sem);
930 }
931 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
932
933 /*
934  * dsp_mem file operations
935  */
936 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
937                            loff_t *ppos)
938 {
939         unsigned long p = *ppos;
940         void *vadr = omap_mmu_to_virt(mmu, p);
941         ssize_t size = mmu->memsize;
942         ssize_t read;
943
944         if (p >= size)
945                 return 0;
946         clk_enable(mmu->memclk);
947         read = count;
948         if (count > size - p)
949                 read = size - p;
950         if (copy_to_user(buf, vadr, read)) {
951                 read = -EFAULT;
952                 goto out;
953         }
954         *ppos += read;
955 out:
956         clk_disable(mmu->memclk);
957         return read;
958 }
959
960 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
961                           loff_t *ppos)
962 {
963         unsigned long p = *ppos;
964         void *vadr = omap_mmu_to_virt(mmu, p);
965
966         if (!exmap_valid(mmu, vadr, count)) {
967                 printk(KERN_ERR
968                        "MMU: DSP address %08lx / size %08x "
969                        "is not valid!\n", p, count);
970                 return -EFAULT;
971         }
972         if (count > (1 << mmu->addrspace) - p)
973                 count = (1 << mmu->addrspace) - p;
974         if (copy_to_user(buf, vadr, count))
975                 return -EFAULT;
976         *ppos += count;
977
978         return count;
979 }
980
981 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
982                                  loff_t offset, size_t count)
983 {
984         struct device *dev = to_dev(kobj);
985         struct omap_mmu *mmu = dev_get_drvdata(dev);
986         unsigned long p = (unsigned long)offset;
987         void *vadr = omap_mmu_to_virt(mmu, p);
988         int ret;
989
990         if (omap_mmu_mem_enable(mmu, vadr) < 0)
991                 return -EBUSY;
992
993         if (p < mmu->memsize)
994                 ret = intmem_read(mmu, buf, count, &offset);
995         else
996                 ret = exmem_read(mmu, buf, count, &offset);
997
998         omap_mmu_mem_disable(mmu, vadr);
999
1000         return ret;
1001 }
1002
1003 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1004                             loff_t *ppos)
1005 {
1006         unsigned long p = *ppos;
1007         void *vadr = omap_mmu_to_virt(mmu, p);
1008         ssize_t size = mmu->memsize;
1009         ssize_t written;
1010
1011         if (p >= size)
1012                 return 0;
1013         clk_enable(mmu->memclk);
1014         written = count;
1015         if (count > size - p)
1016                 written = size - p;
1017         if (copy_from_user(vadr, buf, written)) {
1018                 written = -EFAULT;
1019                 goto out;
1020         }
1021         *ppos += written;
1022 out:
1023         clk_disable(mmu->memclk);
1024         return written;
1025 }
1026
1027 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1028                            loff_t *ppos)
1029 {
1030         unsigned long p = *ppos;
1031         void *vadr = omap_mmu_to_virt(mmu, p);
1032
1033         if (!exmap_valid(mmu, vadr, count)) {
1034                 printk(KERN_ERR
1035                        "MMU: DSP address %08lx / size %08x "
1036                        "is not valid!\n", p, count);
1037                 return -EFAULT;
1038         }
1039         if (count > (1 << mmu->addrspace) - p)
1040                 count = (1 << mmu->addrspace) - p;
1041         if (copy_from_user(vadr, buf, count))
1042                 return -EFAULT;
1043         *ppos += count;
1044
1045         return count;
1046 }
1047
1048 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1049                                   loff_t offset, size_t count)
1050 {
1051         struct device *dev = to_dev(kobj);
1052         struct omap_mmu *mmu = dev_get_drvdata(dev);
1053         unsigned long p = (unsigned long)offset;
1054         void *vadr = omap_mmu_to_virt(mmu, p);
1055         int ret;
1056
1057         if (omap_mmu_mem_enable(mmu, vadr) < 0)
1058                 return -EBUSY;
1059
1060         if (p < mmu->memsize)
1061                 ret = intmem_write(mmu, buf, count, &offset);
1062         else
1063                 ret = exmem_write(mmu, buf, count, &offset);
1064
1065         omap_mmu_mem_disable(mmu, vadr);
1066
1067         return ret;
1068 }
1069
1070 static struct bin_attribute dev_attr_mem = {
1071         .attr   = {
1072                 .name   = "mem",
1073                 .owner  = THIS_MODULE,
1074                 .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
1075         },
1076
1077         .read   = omap_mmu_mem_read,
1078         .write  = omap_mmu_mem_write,
1079 };
1080
1081 /* To be obsolete for backward compatibility */
1082 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1083                             loff_t offset, size_t count)
1084 {
1085         return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1086 }
1087 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1088
1089 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1090                              loff_t offset, size_t count)
1091 {
1092         return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1093 }
1094 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1095
1096 /*
1097  * sysfs files
1098  */
1099 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1100                              char *buf)
1101 {
1102         struct omap_mmu *mmu = dev_get_drvdata(dev);
1103         struct omap_mmu_tlb_lock tlb_lock;
1104         int ret = -EIO;
1105
1106         clk_enable(mmu->clk);
1107         omap_dsp_request_mem();
1108
1109         down_read(&mmu->exmap_sem);
1110
1111         omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1112
1113         if (likely(mmu->ops->show))
1114                 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1115
1116         /* restore victim entry */
1117         omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1118
1119         up_read(&mmu->exmap_sem);
1120         omap_dsp_release_mem();
1121         clk_disable(mmu->clk);
1122
1123         return ret;
1124 }
1125
1126 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1127
1128 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1129                           char *buf)
1130 {
1131         struct omap_mmu *mmu = dev_get_drvdata(dev);
1132         struct exmap_tbl *ent;
1133         int len;
1134         int i = 0;
1135
1136         down_read(&mmu->exmap_sem);
1137         len = sprintf(buf, "  dspadr     size         buf     size uc\n");
1138                          /* 0x300000 0x123000  0xc0171000 0x100000  0*/
1139
1140         omap_mmu_for_each_tlb_entry(mmu, ent) {
1141                 void *vadr;
1142                 unsigned long size;
1143                 enum exmap_type type;
1144                 int idx;
1145
1146                 /* find a top of link */
1147                 if (!ent->valid || (ent->link.prev >= 0))
1148                         continue;
1149
1150                 vadr = ent->vadr;
1151                 type = ent->type;
1152                 size = 0;
1153                 idx = i;
1154                 do {
1155                         ent = mmu->exmap_tbl + idx;
1156                         size += PAGE_SIZE << ent->order;
1157                 } while ((idx = ent->link.next) >= 0);
1158
1159                 len += sprintf(buf + len, "0x%06lx %#8lx",
1160                                virt_to_omap_mmu(mmu, vadr), size);
1161
1162                 if (type == EXMAP_TYPE_FB) {
1163                         len += sprintf(buf + len, "    framebuf\n");
1164                 } else {
1165                         len += sprintf(buf + len, "\n");
1166                         idx = i;
1167                         do {
1168                                 ent = mmu->exmap_tbl + idx;
1169                                 len += sprintf(buf + len,
1170                                                /* 0xc0171000 0x100000  0*/
1171                                                "%19s0x%8p %#8lx %2d\n",
1172                                                "", ent->buf,
1173                                                PAGE_SIZE << ent->order,
1174                                                ent->usecount);
1175                         } while ((idx = ent->link.next) >= 0);
1176                 }
1177
1178                 i++;
1179         }
1180
1181         up_read(&mmu->exmap_sem);
1182         return len;
1183 }
1184
1185 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1186                            const char *buf,
1187                            size_t count)
1188 {
1189         struct omap_mmu *mmu = dev_get_drvdata(dev);
1190         unsigned long base = 0, len = 0;
1191         int ret;
1192
1193         sscanf(buf, "%lx %lx", &base, &len);
1194
1195         if (!base)
1196                 return -EINVAL;
1197
1198         if (len) {
1199                 /* Add the mapping */
1200                 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1201                 if (ret < 0)
1202                         return ret;
1203         } else {
1204                 /* Remove the mapping */
1205                 ret = omap_mmu_exunmap(mmu, base);
1206                 if (ret < 0)
1207                         return ret;
1208         }
1209
1210         return count;
1211 }
1212
1213 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1214
1215 static ssize_t mempool_show(struct class *class, char *buf)
1216 {
1217         int min_nr_1M = 0, curr_nr_1M = 0;
1218         int min_nr_64K = 0, curr_nr_64K = 0;
1219         int total = 0;
1220
1221         if (likely(mempool_1M)) {
1222                 min_nr_1M  = mempool_1M->min_nr;
1223                 curr_nr_1M = mempool_1M->curr_nr;
1224                 total += min_nr_1M * SZ_1M;
1225         }
1226         if (likely(mempool_64K)) {
1227                 min_nr_64K  = mempool_64K->min_nr;
1228                 curr_nr_64K = mempool_64K->curr_nr;
1229                 total += min_nr_64K * SZ_64K;
1230         }
1231
1232         return sprintf(buf,
1233                        "0x%x\n"
1234                        "1M  buffer: %d (%d free)\n"
1235                        "64K buffer: %d (%d free)\n",
1236                        total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1237 }
1238
1239
1240 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1241
1242 static void omap_mmu_class_dev_release(struct device *dev)
1243 {
1244 }
1245
1246 static struct class omap_mmu_class = {
1247         .name           = "mmu",
1248         .dev_release    = omap_mmu_class_dev_release,
1249 };
1250
1251 int omap_mmu_register(struct omap_mmu *mmu)
1252 {
1253         int ret;
1254
1255         mmu->dev.class = &omap_mmu_class;
1256         strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1257         dev_set_drvdata(&mmu->dev, mmu);
1258
1259         mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1260                                  GFP_KERNEL);
1261         if (!mmu->exmap_tbl)
1262                 return -ENOMEM;
1263
1264         ret = device_register(&mmu->dev);
1265         if (unlikely(ret))
1266                 goto err_dev_register;
1267
1268         init_rwsem(&mmu->exmap_sem);
1269
1270         ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1271         printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1272                mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1273
1274         ret = omap_mmu_init(mmu);
1275         if (unlikely(ret))
1276                 goto err_mmu_init;
1277
1278         ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1279         if (unlikely(ret))
1280                 goto err_dev_create_mmu;
1281         ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1282         if (unlikely(ret))
1283                 goto err_dev_create_exmap;
1284
1285         if (likely(mmu->membase)) {
1286                 dev_attr_mem.size = mmu->memsize;
1287                 ret = device_create_bin_file(&mmu->dev,
1288                                              &dev_attr_mem);
1289                 if (unlikely(ret))
1290                         goto err_bin_create_mem;
1291         }
1292
1293         return 0;
1294
1295 err_bin_create_mem:
1296         device_remove_file(&mmu->dev, &dev_attr_exmap);
1297 err_dev_create_exmap:
1298         device_remove_file(&mmu->dev, &dev_attr_mmu);
1299 err_dev_create_mmu:
1300         omap_mmu_shutdown(mmu);
1301 err_mmu_init:
1302         device_unregister(&mmu->dev);
1303 err_dev_register:
1304         kfree(mmu->exmap_tbl);
1305         mmu->exmap_tbl = NULL;
1306         return ret;
1307 }
1308 EXPORT_SYMBOL_GPL(omap_mmu_register);
1309
1310 void omap_mmu_unregister(struct omap_mmu *mmu)
1311 {
1312         omap_mmu_shutdown(mmu);
1313         omap_mmu_kmem_release();
1314
1315         device_remove_file(&mmu->dev, &dev_attr_mmu);
1316         device_remove_file(&mmu->dev, &dev_attr_exmap);
1317
1318         if (likely(mmu->membase))
1319                 device_remove_bin_file(&mmu->dev,
1320                                              &dev_attr_mem);
1321
1322         kfree(mmu->exmap_tbl);
1323         mmu->exmap_tbl = NULL;
1324
1325         device_unregister(&mmu->dev);
1326 }
1327 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1328
1329 static int __init omap_mmu_class_init(void)
1330 {
1331         int ret = class_register(&omap_mmu_class);
1332         if (!ret)
1333                 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1334
1335         return ret;
1336 }
1337
1338 static void __exit omap_mmu_class_exit(void)
1339 {
1340         class_remove_file(&omap_mmu_class, &class_attr_mempool);
1341         class_unregister(&omap_mmu_class);
1342 }
1343
1344 subsys_initcall(omap_mmu_class_init);
1345 module_exit(omap_mmu_class_exit);
1346
1347 MODULE_LICENSE("GPL");