2 * linux/arch/arm/plat-omap/mmu.c
4 * OMAP MMU management framework
6 * Copyright (C) 2002-2005 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <paul.mundt@nokia.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/clk.h>
30 #include <linux/device.h>
31 #include <linux/interrupt.h>
32 #include <asm/uaccess.h>
34 #include <asm/pgalloc.h>
35 #include <asm/pgtable.h>
36 #include <asm/arch/mmu.h>
37 #include <asm/sizes.h>
39 #if defined(CONFIG_ARCH_OMAP1)
40 #include "../mach-omap1/mmu.h"
41 #elif defined(CONFIG_ARCH_OMAP2)
42 #include "../mach-omap2/mmu.h"
46 * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
47 * MMU has base and victim implemented in different bits in the LOCK
48 * register (shifts are still the same), all of the other registers are
49 * the same on all of the MMUs..
51 #define MMU_LOCK_BASE_SHIFT 10
52 #define MMU_LOCK_VICTIM_SHIFT 4
54 #define CAMERA_MMU_LOCK_BASE_MASK (0x7 << MMU_LOCK_BASE_SHIFT)
55 #define CAMERA_MMU_LOCK_VICTIM_MASK (0x7 << MMU_LOCK_VICTIM_SHIFT)
57 #define is_aligned(adr,align) (!((adr)&((align)-1)))
58 #define ORDER_1MB (20 - PAGE_SHIFT)
59 #define ORDER_64KB (16 - PAGE_SHIFT)
60 #define ORDER_4KB (12 - PAGE_SHIFT)
62 static mempool_t *mempool_1M;
63 static mempool_t *mempool_64K;
65 #define omap_mmu_for_each_tlb_entry(mmu, entry) \
66 for (entry = mmu->exmap_tbl; prefetch(entry + 1), \
67 entry < (mmu->exmap_tbl + mmu->nr_tlb_entries); \
70 #define to_dev(obj) container_of(obj, struct device, kobj)
72 static void *mempool_alloc_from_pool(mempool_t *pool,
73 unsigned int __nocast gfp_mask)
75 spin_lock_irq(&pool->lock);
76 if (likely(pool->curr_nr)) {
77 void *element = pool->elements[--pool->curr_nr];
78 spin_unlock_irq(&pool->lock);
82 spin_unlock_irq(&pool->lock);
83 return mempool_alloc(pool, gfp_mask);
87 * kmem_reserve(), kmem_release():
88 * reserve or release kernel memory for exmap().
90 * exmap() might request consecutive 1MB or 64kB,
91 * but it will be difficult after memory pages are fragmented.
92 * So, user can reserve such memory blocks in the early phase
93 * through kmem_reserve().
95 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
97 return (void *)__get_dma_pages(gfp, (unsigned int)order);
100 static void omap_mmu_pool_free(void *buf, void *order)
102 free_pages((unsigned long)buf, (unsigned int)order);
105 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
107 unsigned long len = size;
109 /* alignment check */
110 if (!is_aligned(size, SZ_64K)) {
112 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
116 if (size > (1 << mmu->addrspace)) {
118 "omapdsp: size(0x%lx) is larger than DSP memory space "
119 "size (0x%x.\n", size, (1 << mmu->addrspace));
126 if (likely(!mempool_1M))
127 mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
131 mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
134 size &= ~(0xf << 20);
137 if (size >= SZ_64K) {
140 if (likely(!mempool_64K))
141 mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
145 mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
148 size &= ~(0xf << 16);
156 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
158 void omap_mmu_kmem_release(void)
161 mempool_destroy(mempool_64K);
166 mempool_destroy(mempool_1M);
170 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
172 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
174 struct page *page, *ps, *pe;
176 ps = virt_to_page(buf);
177 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
179 for (page = ps; page < pe; page++)
180 ClearPageReserved(page);
182 if ((order == ORDER_64KB) && likely(mempool_64K))
183 mempool_free((void *)buf, mempool_64K);
184 else if ((order == ORDER_1MB) && likely(mempool_1M))
185 mempool_free((void *)buf, mempool_1M);
187 free_pages(buf, order);
193 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
196 unsigned long sz_left;
199 int prot_pmd, prot_pte;
202 "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
205 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
206 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
208 pmdp = pmd_offset(pgd_offset_k(virt), virt);
209 if (pmd_none(*pmdp)) {
210 ptep = pte_alloc_one_kernel(&init_mm, 0);
213 /* note: two PMDs will be set */
214 pmd_populate_kernel(&init_mm, pmdp, ptep);
219 sz_left >= PAGE_SIZE;
220 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
221 ptep = pte_offset_kernel(pmdp, virt);
222 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
229 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
231 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
233 unsigned long sz_left;
238 "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
242 sz_left >= PAGE_SIZE;
243 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
244 pmdp = pmd_offset(pgd_offset_k(virt), virt);
245 ptep = pte_offset_kernel(pmdp, virt);
246 pte_clear(&init_mm, virt, ptep);
251 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
253 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
255 /* exmap_sem should be held before calling this function */
256 struct exmap_tbl *ent;
259 omap_mmu_for_each_tlb_entry(mmu, ent) {
261 unsigned long mapsize;
265 mapadr = (void *)ent->vadr;
266 mapsize = 1 << (ent->order + PAGE_SHIFT);
267 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
268 if (vadr + len <= mapadr + mapsize) {
269 /* this map covers whole address. */
273 * this map covers partially.
274 * check rest portion.
276 len -= mapadr + mapsize - vadr;
277 vadr = mapadr + mapsize;
285 EXPORT_SYMBOL_GPL(exmap_valid);
288 * omap_mmu_exmap_use(), unuse():
289 * when the mapped area is exported to user space with mmap,
290 * the usecount is incremented.
291 * while the usecount > 0, that area can't be released.
293 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
295 struct exmap_tbl *ent;
297 down_write(&mmu->exmap_sem);
298 omap_mmu_for_each_tlb_entry(mmu, ent) {
300 unsigned long mapsize;
304 mapadr = (void *)ent->vadr;
305 mapsize = 1 << (ent->order + PAGE_SHIFT);
306 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
309 up_write(&mmu->exmap_sem);
311 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
313 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
315 struct exmap_tbl *ent;
317 down_write(&mmu->exmap_sem);
318 omap_mmu_for_each_tlb_entry(mmu, ent) {
320 unsigned long mapsize;
324 mapadr = (void *)ent->vadr;
325 mapsize = 1 << (ent->order + PAGE_SHIFT);
326 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
329 up_write(&mmu->exmap_sem);
331 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
334 * omap_mmu_virt_to_phys()
335 * returns physical address, and sets len to valid length
338 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
340 struct exmap_tbl *ent;
342 if (omap_mmu_internal_memory(mmu, vadr)) {
343 unsigned long addr = (unsigned long)vadr;
344 *len = mmu->membase + mmu->memsize - addr;
349 omap_mmu_for_each_tlb_entry(mmu, ent) {
351 unsigned long mapsize;
355 mapadr = (void *)ent->vadr;
356 mapsize = 1 << (ent->order + PAGE_SHIFT);
357 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
358 *len = mapadr + mapsize - vadr;
359 return __pa(ent->buf) + vadr - mapadr;
363 /* valid mapping not found */
366 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
371 static struct cam_ram_regset *
372 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
374 return mmu->ops->cam_ram_alloc(entry);
377 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
378 struct cam_ram_regset *cr)
380 return mmu->ops->cam_ram_valid(cr);
384 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
386 unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
389 mask = (mmu->type == OMAP_MMU_CAMERA) ?
390 CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
391 tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
393 mask = (mmu->type == OMAP_MMU_CAMERA) ?
394 CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
395 tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
399 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
401 omap_mmu_write_reg(mmu,
402 (lock->base << MMU_LOCK_BASE_SHIFT) |
403 (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
406 static inline void omap_mmu_flush(struct omap_mmu *mmu)
408 omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
411 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
413 omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
416 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
417 struct cam_ram_regset *cr)
420 omap_mmu_set_tlb_lock(mmu, lock);
422 if (likely(mmu->ops->read_tlb))
423 mmu->ops->read_tlb(mmu, cr);
425 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
427 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
429 if (likely(mmu->ops->load_tlb))
430 mmu->ops->load_tlb(mmu, cr);
432 /* flush the entry */
435 /* load a TLB entry */
439 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
440 struct omap_mmu_tlb_entry *entry)
442 struct omap_mmu_tlb_lock lock;
443 struct cam_ram_regset *cr;
445 clk_enable(mmu->clk);
446 omap_dsp_request_mem();
448 omap_mmu_get_tlb_lock(mmu, &lock);
449 for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
450 struct cam_ram_regset tmp;
452 /* read a TLB entry */
453 omap_mmu_read_tlb(mmu, &lock, &tmp);
454 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
457 omap_mmu_set_tlb_lock(mmu, &lock);
460 /* The last entry cannot be locked? */
461 if (lock.victim == (mmu->nr_tlb_entries - 1)) {
462 printk(KERN_ERR "MMU: TLB is full.\n");
466 cr = omap_mmu_cam_ram_alloc(mmu, entry);
470 omap_mmu_load_tlb(mmu, cr);
473 /* update lock base */
474 if (lock.victim == lock.base)
477 omap_mmu_set_tlb_lock(mmu, &lock);
479 omap_dsp_release_mem();
480 clk_disable(mmu->clk);
483 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
485 static inline unsigned long
486 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
488 return mmu->ops->cam_va(cr);
491 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
493 struct omap_mmu_tlb_lock lock;
497 clk_enable(mmu->clk);
498 omap_dsp_request_mem();
500 omap_mmu_get_tlb_lock(mmu, &lock);
501 for (i = 0; i < lock.base; i++) {
502 struct cam_ram_regset cr;
504 /* read a TLB entry */
506 omap_mmu_read_tlb(mmu, &lock, &cr);
507 if (!omap_mmu_cam_ram_valid(mmu, &cr))
510 if (omap_mmu_cam_va(mmu, &cr) == vadr)
511 /* flush the entry */
517 /* set new lock base */
518 lock.base = lock.victim = max_valid + 1;
519 omap_mmu_set_tlb_lock(mmu, &lock);
521 omap_dsp_release_mem();
522 clk_disable(mmu->clk);
525 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
527 static void omap_mmu_gflush(struct omap_mmu *mmu)
529 struct omap_mmu_tlb_lock lock;
531 clk_enable(mmu->clk);
532 omap_dsp_request_mem();
534 omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
535 lock.base = lock.victim = mmu->nr_exmap_preserved;
536 omap_mmu_set_tlb_lock(mmu, &lock);
538 omap_dsp_release_mem();
539 clk_disable(mmu->clk);
545 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
546 * In this case, the buffer for DSP is allocated in this routine,
548 * On the other hand, for example - frame buffer sharing, calls
549 * this function with padr set. It means some known address space
550 * pointed with padr is going to be shared with DSP.
552 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
553 unsigned long padr, unsigned long size,
554 enum exmap_type type)
558 unsigned int order = 0;
561 unsigned long _dspadr = dspadr;
562 unsigned long _padr = padr;
563 void *_vadr = omap_mmu_to_virt(mmu, dspadr);
564 unsigned long _size = size;
565 struct omap_mmu_tlb_entry tlb_ent;
566 struct exmap_tbl *exmap_ent, *tmp_ent;
570 #define MINIMUM_PAGESZ SZ_4K
574 if (!is_aligned(size, MINIMUM_PAGESZ)) {
576 "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
579 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
581 "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
584 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
586 "MMU: physical address(0x%lx) is not aligned.\n",
591 /* address validity check */
592 if ((dspadr < mmu->memsize) ||
593 (dspadr >= (1 << mmu->addrspace))) {
595 "MMU: illegal address/size for %s().\n",
600 down_write(&mmu->exmap_sem);
603 omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
604 unsigned long mapsize;
608 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
609 if ((_vadr + size > tmp_ent->vadr) &&
610 (_vadr < tmp_ent->vadr + mapsize)) {
611 printk(KERN_ERR "MMU: exmap page overlap!\n");
612 up_write(&mmu->exmap_sem);
619 /* Are there any free TLB lines? */
620 for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
621 if (!mmu->exmap_tbl[idx].valid)
624 printk(KERN_ERR "MMU: DSP TLB is full.\n");
629 exmap_ent = mmu->exmap_tbl + idx;
631 if ((_size >= SZ_1M) &&
632 (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
633 is_aligned(_dspadr, SZ_1M)) {
635 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
636 } else if ((_size >= SZ_64K) &&
637 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
638 is_aligned(_dspadr, SZ_64K)) {
640 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
643 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
646 order = get_order(unit);
648 /* buffer allocation */
649 if (type == EXMAP_TYPE_MEM) {
650 struct page *page, *ps, *pe;
652 if ((order == ORDER_1MB) && likely(mempool_1M))
653 buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
654 else if ((order == ORDER_64KB) && likely(mempool_64K))
655 buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
657 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
664 /* mark the pages as reserved; this is needed for mmap */
665 ps = virt_to_page(buf);
666 pe = virt_to_page(buf + unit);
668 for (page = ps; page < pe; page++)
669 SetPageReserved(page);
675 * mapping for ARM MMU:
676 * we should not access to the allocated memory through 'buf'
677 * since this area should not be cached.
679 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
683 /* loading DSP TLB entry */
684 INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
685 status = omap_mmu_load_tlb_entry(mmu, &tlb_ent);
687 exmap_clear_armmmu((unsigned long)_vadr, unit);
691 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
692 exmap_ent->link.prev = prev;
694 mmu->exmap_tbl[prev].link.next = idx;
696 if ((_size -= unit) == 0) { /* normal completion */
697 up_write(&mmu->exmap_sem);
703 _padr = padr ? _padr + unit : 0;
708 up_write(&mmu->exmap_sem);
710 omap_mmu_free_pages((unsigned long)buf, order);
711 omap_mmu_exunmap(mmu, dspadr);
714 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
716 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
720 /* clearing ARM MMU */
721 size = 1 << (ent->order + PAGE_SHIFT);
722 exmap_clear_armmmu((unsigned long)ent->vadr, size);
724 /* freeing allocated memory */
725 if (ent->type == EXMAP_TYPE_MEM) {
726 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
728 "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
736 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
741 struct exmap_tbl *ent;
744 vadr = omap_mmu_to_virt(mmu, dspadr);
745 down_write(&mmu->exmap_sem);
746 for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
747 ent = mmu->exmap_tbl + idx;
748 if (!ent->valid || ent->prsvd)
750 if (ent->vadr == vadr)
753 up_write(&mmu->exmap_sem);
755 "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
759 if (ent->usecount > 0) {
761 "MMU: exmap reference count is not 0.\n"
762 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
763 idx, ent->vadr, ent->order, ent->usecount);
764 up_write(&mmu->exmap_sem);
767 /* clearing DSP TLB entry */
768 omap_mmu_clear_tlb_entry(mmu, dspadr);
770 /* clear ARM MMU and free buffer */
771 size = unmap_free_arm(ent);
774 /* we don't free PTEs */
777 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
779 /* check if next mapping is in same group */
780 idx = ent->link.next;
782 goto up_out; /* normal completion */
783 ent = mmu->exmap_tbl + idx;
786 if (ent->vadr == vadr)
787 goto found_map; /* continue */
790 "MMU: illegal exmap_tbl grouping!\n"
791 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
792 vadr, idx, ent->vadr);
793 up_write(&mmu->exmap_sem);
797 up_write(&mmu->exmap_sem);
800 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
802 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
804 struct exmap_tbl *ent;
806 down_write(&mmu->exmap_sem);
808 /* clearing TLB entry */
809 omap_mmu_gflush(mmu);
811 omap_mmu_for_each_tlb_entry(mmu, ent)
812 if (ent->valid && !ent->prsvd)
816 if (likely(mmu->membase))
817 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
818 mmu->membase + (1 << mmu->addrspace));
820 up_write(&mmu->exmap_sem);
822 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
824 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
825 unsigned long dspadr, int index)
829 struct omap_mmu_tlb_entry tlb_ent;
832 virt = omap_mmu_to_virt(mmu, dspadr);
833 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
834 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
835 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
836 omap_mmu_load_tlb_entry(mmu, &tlb_ent);
838 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
840 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
842 void *virt = omap_mmu_to_virt(mmu, dspadr);
844 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
845 /* DSP MMU is shutting down. not handled here. */
847 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
849 static void omap_mmu_reset(struct omap_mmu *mmu)
853 omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
855 for (i = 0; i < 10000; i++)
856 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
860 void omap_mmu_disable(struct omap_mmu *mmu)
862 omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
864 EXPORT_SYMBOL_GPL(omap_mmu_disable);
866 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
871 omap_mmu_write_reg(mmu, 0x2, MMU_CNTL);
873 EXPORT_SYMBOL_GPL(omap_mmu_enable);
875 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
877 struct omap_mmu *mmu = dev_id;
879 if (likely(mmu->ops->interrupt))
880 mmu->ops->interrupt(mmu);
885 static int omap_mmu_init(struct omap_mmu *mmu)
887 struct omap_mmu_tlb_lock tlb_lock;
890 clk_enable(mmu->clk);
891 omap_dsp_request_mem();
892 down_write(&mmu->exmap_sem);
894 ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
898 "failed to register MMU interrupt: %d\n", ret);
902 omap_mmu_disable(mmu); /* clear all */
904 omap_mmu_enable(mmu, 1);
906 memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
907 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
909 if (unlikely(mmu->ops->startup))
910 ret = mmu->ops->startup(mmu);
912 up_write(&mmu->exmap_sem);
913 omap_dsp_release_mem();
914 clk_disable(mmu->clk);
919 static void omap_mmu_shutdown(struct omap_mmu *mmu)
921 free_irq(mmu->irq, mmu);
923 if (unlikely(mmu->ops->shutdown))
924 mmu->ops->shutdown(mmu);
926 omap_mmu_exmap_flush(mmu);
927 omap_mmu_disable(mmu); /* clear all */
931 * omap_mmu_mem_enable() / disable()
933 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
935 if (unlikely(mmu->ops->mem_enable))
936 return mmu->ops->mem_enable(mmu, addr);
938 down_read(&mmu->exmap_sem);
941 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
943 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
945 if (unlikely(mmu->ops->mem_disable)) {
946 mmu->ops->mem_disable(mmu, addr);
950 up_read(&mmu->exmap_sem);
952 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
955 * dsp_mem file operations
957 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
960 unsigned long p = *ppos;
961 void *vadr = omap_mmu_to_virt(mmu, p);
962 ssize_t size = mmu->memsize;
967 clk_enable(mmu->memclk);
969 if (count > size - p)
971 if (copy_to_user(buf, vadr, read)) {
977 clk_disable(mmu->memclk);
981 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
984 unsigned long p = *ppos;
985 void *vadr = omap_mmu_to_virt(mmu, p);
987 if (!exmap_valid(mmu, vadr, count)) {
989 "MMU: DSP address %08lx / size %08x "
990 "is not valid!\n", p, count);
993 if (count > (1 << mmu->addrspace) - p)
994 count = (1 << mmu->addrspace) - p;
995 if (copy_to_user(buf, vadr, count))
1002 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1003 loff_t offset, size_t count)
1005 struct device *dev = to_dev(kobj);
1006 struct omap_mmu *mmu = dev_get_drvdata(dev);
1007 unsigned long p = (unsigned long)offset;
1008 void *vadr = omap_mmu_to_virt(mmu, p);
1011 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1014 if (p < mmu->memsize)
1015 ret = intmem_read(mmu, buf, count, &offset);
1017 ret = exmem_read(mmu, buf, count, &offset);
1019 omap_mmu_mem_disable(mmu, vadr);
1024 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1027 unsigned long p = *ppos;
1028 void *vadr = omap_mmu_to_virt(mmu, p);
1029 ssize_t size = mmu->memsize;
1034 clk_enable(mmu->memclk);
1036 if (count > size - p)
1038 if (copy_from_user(vadr, buf, written)) {
1044 clk_disable(mmu->memclk);
1048 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1051 unsigned long p = *ppos;
1052 void *vadr = omap_mmu_to_virt(mmu, p);
1054 if (!exmap_valid(mmu, vadr, count)) {
1056 "MMU: DSP address %08lx / size %08x "
1057 "is not valid!\n", p, count);
1060 if (count > (1 << mmu->addrspace) - p)
1061 count = (1 << mmu->addrspace) - p;
1062 if (copy_from_user(vadr, buf, count))
1069 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1070 loff_t offset, size_t count)
1072 struct device *dev = to_dev(kobj);
1073 struct omap_mmu *mmu = dev_get_drvdata(dev);
1074 unsigned long p = (unsigned long)offset;
1075 void *vadr = omap_mmu_to_virt(mmu, p);
1078 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1081 if (p < mmu->memsize)
1082 ret = intmem_write(mmu, buf, count, &offset);
1084 ret = exmem_write(mmu, buf, count, &offset);
1086 omap_mmu_mem_disable(mmu, vadr);
1091 static struct bin_attribute dev_attr_mem = {
1094 .owner = THIS_MODULE,
1095 .mode = S_IRUSR | S_IWUSR | S_IRGRP,
1098 .read = omap_mmu_mem_read,
1099 .write = omap_mmu_mem_write,
1102 /* To be obsolete for backward compatibility */
1103 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1104 loff_t offset, size_t count)
1106 return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1108 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1110 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1111 loff_t offset, size_t count)
1113 return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1115 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1120 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1123 struct omap_mmu *mmu = dev_get_drvdata(dev);
1124 struct omap_mmu_tlb_lock tlb_lock;
1127 clk_enable(mmu->clk);
1128 omap_dsp_request_mem();
1130 down_read(&mmu->exmap_sem);
1132 omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1134 if (likely(mmu->ops->show))
1135 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1137 /* restore victim entry */
1138 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1140 up_read(&mmu->exmap_sem);
1141 omap_dsp_release_mem();
1142 clk_disable(mmu->clk);
1147 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1149 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1152 struct omap_mmu *mmu = dev_get_drvdata(dev);
1153 struct exmap_tbl *ent;
1157 down_read(&mmu->exmap_sem);
1158 len = sprintf(buf, " dspadr size buf size uc\n");
1159 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
1161 omap_mmu_for_each_tlb_entry(mmu, ent) {
1164 enum exmap_type type;
1167 /* find a top of link */
1168 if (!ent->valid || (ent->link.prev >= 0))
1176 ent = mmu->exmap_tbl + idx;
1177 size += PAGE_SIZE << ent->order;
1178 } while ((idx = ent->link.next) >= 0);
1180 len += sprintf(buf + len, "0x%06lx %#8lx",
1181 virt_to_omap_mmu(mmu, vadr), size);
1183 if (type == EXMAP_TYPE_FB) {
1184 len += sprintf(buf + len, " framebuf\n");
1186 len += sprintf(buf + len, "\n");
1189 ent = mmu->exmap_tbl + idx;
1190 len += sprintf(buf + len,
1191 /* 0xc0171000 0x100000 0*/
1192 "%19s0x%8p %#8lx %2d\n",
1194 PAGE_SIZE << ent->order,
1196 } while ((idx = ent->link.next) >= 0);
1202 up_read(&mmu->exmap_sem);
1206 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1210 struct omap_mmu *mmu = dev_get_drvdata(dev);
1211 unsigned long base = 0, len = 0;
1214 sscanf(buf, "%lx %lx", &base, &len);
1220 /* Add the mapping */
1221 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1225 /* Remove the mapping */
1226 ret = omap_mmu_exunmap(mmu, base);
1234 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1236 static ssize_t mempool_show(struct class *class, char *buf)
1238 int min_nr_1M = 0, curr_nr_1M = 0;
1239 int min_nr_64K = 0, curr_nr_64K = 0;
1242 if (likely(mempool_1M)) {
1243 min_nr_1M = mempool_1M->min_nr;
1244 curr_nr_1M = mempool_1M->curr_nr;
1245 total += min_nr_1M * SZ_1M;
1247 if (likely(mempool_64K)) {
1248 min_nr_64K = mempool_64K->min_nr;
1249 curr_nr_64K = mempool_64K->curr_nr;
1250 total += min_nr_64K * SZ_64K;
1255 "1M buffer: %d (%d free)\n"
1256 "64K buffer: %d (%d free)\n",
1257 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1261 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1263 static void omap_mmu_class_dev_release(struct device *dev)
1267 static struct class omap_mmu_class = {
1269 .dev_release = omap_mmu_class_dev_release,
1272 int omap_mmu_register(struct omap_mmu *mmu)
1276 mmu->dev.class = &omap_mmu_class;
1277 strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1278 dev_set_drvdata(&mmu->dev, mmu);
1280 mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1282 if (!mmu->exmap_tbl)
1285 ret = device_register(&mmu->dev);
1287 goto err_dev_register;
1289 init_rwsem(&mmu->exmap_sem);
1291 ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1292 printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1293 mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1295 ret = omap_mmu_init(mmu);
1299 ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1301 goto err_dev_create_mmu;
1302 ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1304 goto err_dev_create_exmap;
1306 if (likely(mmu->membase)) {
1307 dev_attr_mem.size = mmu->memsize;
1308 ret = device_create_bin_file(&mmu->dev,
1311 goto err_bin_create_mem;
1317 device_remove_file(&mmu->dev, &dev_attr_exmap);
1318 err_dev_create_exmap:
1319 device_remove_file(&mmu->dev, &dev_attr_mmu);
1321 omap_mmu_shutdown(mmu);
1323 device_unregister(&mmu->dev);
1325 kfree(mmu->exmap_tbl);
1326 mmu->exmap_tbl = NULL;
1329 EXPORT_SYMBOL_GPL(omap_mmu_register);
1331 void omap_mmu_unregister(struct omap_mmu *mmu)
1333 omap_mmu_shutdown(mmu);
1334 omap_mmu_kmem_release();
1336 device_remove_file(&mmu->dev, &dev_attr_mmu);
1337 device_remove_file(&mmu->dev, &dev_attr_exmap);
1339 if (likely(mmu->membase))
1340 device_remove_bin_file(&mmu->dev,
1343 kfree(mmu->exmap_tbl);
1344 mmu->exmap_tbl = NULL;
1346 device_unregister(&mmu->dev);
1348 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1350 static int __init omap_mmu_class_init(void)
1352 int ret = class_register(&omap_mmu_class);
1354 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1359 static void __exit omap_mmu_class_exit(void)
1361 class_remove_file(&omap_mmu_class, &class_attr_mempool);
1362 class_unregister(&omap_mmu_class);
1365 subsys_initcall(omap_mmu_class_init);
1366 module_exit(omap_mmu_class_exit);
1368 MODULE_LICENSE("GPL");