2 * linux/arch/arm/plat-omap/mmu.c
4 * OMAP MMU management framework
6 * Copyright (C) 2002-2006 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <lethal@linux-sh.org>
11 * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/clk.h>
32 #include <linux/device.h>
33 #include <linux/interrupt.h>
34 #include <asm/uaccess.h>
36 #include <asm/pgalloc.h>
37 #include <asm/pgtable.h>
38 #include <asm/arch/mmu.h>
39 #include <asm/sizes.h>
41 #if defined(CONFIG_ARCH_OMAP1)
42 #include "../mach-omap1/mmu.h"
43 #elif defined(CONFIG_ARCH_OMAP2)
44 #include "../mach-omap2/mmu.h"
48 * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
49 * MMU has base and victim implemented in different bits in the LOCK
50 * register (shifts are still the same), all of the other registers are
51 * the same on all of the MMUs..
53 #define MMU_LOCK_BASE_SHIFT 10
54 #define MMU_LOCK_VICTIM_SHIFT 4
56 #define CAMERA_MMU_LOCK_BASE_MASK (0x7 << MMU_LOCK_BASE_SHIFT)
57 #define CAMERA_MMU_LOCK_VICTIM_MASK (0x7 << MMU_LOCK_VICTIM_SHIFT)
59 #define is_aligned(adr,align) (!((adr)&((align)-1)))
60 #define ORDER_1MB (20 - PAGE_SHIFT)
61 #define ORDER_64KB (16 - PAGE_SHIFT)
62 #define ORDER_4KB (12 - PAGE_SHIFT)
64 #define MMU_CNTL_EMUTLBUPDATE (1<<3)
65 #define MMU_CNTL_TWLENABLE (1<<2)
66 #define MMU_CNTL_MMUENABLE (1<<1)
68 static mempool_t *mempool_1M;
69 static mempool_t *mempool_64K;
71 #define omap_mmu_for_each_tlb_entry(mmu, entry) \
72 for (entry = mmu->exmap_tbl; prefetch(entry + 1), \
73 entry < (mmu->exmap_tbl + mmu->nr_tlb_entries); \
76 #define to_dev(obj) container_of(obj, struct device, kobj)
78 static void *mempool_alloc_from_pool(mempool_t *pool,
79 unsigned int __nocast gfp_mask)
81 spin_lock_irq(&pool->lock);
82 if (likely(pool->curr_nr)) {
83 void *element = pool->elements[--pool->curr_nr];
84 spin_unlock_irq(&pool->lock);
88 spin_unlock_irq(&pool->lock);
89 return mempool_alloc(pool, gfp_mask);
93 * kmem_reserve(), kmem_release():
94 * reserve or release kernel memory for exmap().
96 * exmap() might request consecutive 1MB or 64kB,
97 * but it will be difficult after memory pages are fragmented.
98 * So, user can reserve such memory blocks in the early phase
99 * through kmem_reserve().
101 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
103 return (void *)__get_dma_pages(gfp, (unsigned int)order);
106 static void omap_mmu_pool_free(void *buf, void *order)
108 free_pages((unsigned long)buf, (unsigned int)order);
111 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
113 unsigned long len = size;
115 /* alignment check */
116 if (!is_aligned(size, SZ_64K)) {
118 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
122 if (size > (1 << mmu->addrspace)) {
124 "omapdsp: size(0x%lx) is larger than DSP memory space "
125 "size (0x%x.\n", size, (1 << mmu->addrspace));
132 if (likely(!mempool_1M))
133 mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
137 mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
140 size &= ~(0xf << 20);
143 if (size >= SZ_64K) {
146 if (likely(!mempool_64K))
147 mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
151 mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
154 size &= ~(0xf << 16);
162 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
164 void omap_mmu_kmem_release(void)
167 mempool_destroy(mempool_64K);
172 mempool_destroy(mempool_1M);
176 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
178 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
180 struct page *page, *ps, *pe;
182 ps = virt_to_page(buf);
183 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
185 for (page = ps; page < pe; page++)
186 ClearPageReserved(page);
188 if ((order == ORDER_64KB) && likely(mempool_64K))
189 mempool_free((void *)buf, mempool_64K);
190 else if ((order == ORDER_1MB) && likely(mempool_1M))
191 mempool_free((void *)buf, mempool_1M);
193 free_pages(buf, order);
199 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
202 unsigned long sz_left;
205 int prot_pmd, prot_pte;
208 "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
211 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
212 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
214 pmdp = pmd_offset(pgd_offset_k(virt), virt);
215 if (pmd_none(*pmdp)) {
216 ptep = pte_alloc_one_kernel(&init_mm, 0);
219 /* note: two PMDs will be set */
220 pmd_populate_kernel(&init_mm, pmdp, ptep);
225 sz_left >= PAGE_SIZE;
226 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
227 ptep = pte_offset_kernel(pmdp, virt);
228 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
235 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
237 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
239 unsigned long sz_left;
244 "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
248 sz_left >= PAGE_SIZE;
249 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
250 pmdp = pmd_offset(pgd_offset_k(virt), virt);
251 ptep = pte_offset_kernel(pmdp, virt);
252 pte_clear(&init_mm, virt, ptep);
257 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
259 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
261 /* exmap_sem should be held before calling this function */
262 struct exmap_tbl *ent;
265 omap_mmu_for_each_tlb_entry(mmu, ent) {
267 unsigned long mapsize;
271 mapadr = (void *)ent->vadr;
272 mapsize = 1 << (ent->order + PAGE_SHIFT);
273 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
274 if (vadr + len <= mapadr + mapsize) {
275 /* this map covers whole address. */
279 * this map covers partially.
280 * check rest portion.
282 len -= mapadr + mapsize - vadr;
283 vadr = mapadr + mapsize;
291 EXPORT_SYMBOL_GPL(exmap_valid);
294 * omap_mmu_exmap_use(), unuse():
295 * when the mapped area is exported to user space with mmap,
296 * the usecount is incremented.
297 * while the usecount > 0, that area can't be released.
299 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
301 struct exmap_tbl *ent;
303 down_write(&mmu->exmap_sem);
304 omap_mmu_for_each_tlb_entry(mmu, ent) {
306 unsigned long mapsize;
310 mapadr = (void *)ent->vadr;
311 mapsize = 1 << (ent->order + PAGE_SHIFT);
312 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
315 up_write(&mmu->exmap_sem);
317 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
319 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
321 struct exmap_tbl *ent;
323 down_write(&mmu->exmap_sem);
324 omap_mmu_for_each_tlb_entry(mmu, ent) {
326 unsigned long mapsize;
330 mapadr = (void *)ent->vadr;
331 mapsize = 1 << (ent->order + PAGE_SHIFT);
332 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
335 up_write(&mmu->exmap_sem);
337 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
340 * omap_mmu_virt_to_phys()
341 * returns physical address, and sets len to valid length
344 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
346 struct exmap_tbl *ent;
348 if (omap_mmu_internal_memory(mmu, vadr)) {
349 unsigned long addr = (unsigned long)vadr;
350 *len = mmu->membase + mmu->memsize - addr;
355 omap_mmu_for_each_tlb_entry(mmu, ent) {
357 unsigned long mapsize;
361 mapadr = (void *)ent->vadr;
362 mapsize = 1 << (ent->order + PAGE_SHIFT);
363 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
364 *len = mapadr + mapsize - vadr;
365 return __pa(ent->buf) + vadr - mapadr;
369 /* valid mapping not found */
372 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
378 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
379 unsigned long phys, int prot)
381 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
382 if (virt & (1 << SECTION_SHIFT))
384 *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
385 flush_pmd_entry(pmdp);
389 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
390 unsigned long phys, int prot)
393 for (i = 0; i < 16; i += 1) {
394 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
395 virt += (PGDIR_SIZE / 2);
400 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
401 unsigned long phys, pgprot_t prot)
404 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
406 if (!(prot & PTE_TYPE_MASK))
407 prot |= PTE_TYPE_SMALL;
409 if (pmd_none(*pmdp)) {
410 ptep = pte_alloc_one_kernel(mm, virt);
413 pmd_populate_kernel(mm, pmdp, ptep);
415 ptep = pte_offset_kernel(pmdp, virt);
416 ptep -= PTRS_PER_PTE;
417 *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
418 flush_pmd_entry((pmd_t *)ptep);
423 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
424 unsigned long phys, pgprot_t prot)
427 for (i = 0; i < 16; i += 1) {
428 ret = omap_mmu_alloc_page(mm, virt, phys,
429 prot | PTE_TYPE_LARGE);
431 return -ENOMEM; /* only 1st time */
437 static int omap_mmu_load_pte(struct omap_mmu *mmu,
438 struct omap_mmu_tlb_entry *e)
441 struct mm_struct *mm = mmu->twl_mm;
442 const unsigned long va = e->va;
443 const unsigned long pa = e->pa;
444 const pgprot_t prot = mmu->ops->pte_get_attr(e);
446 spin_lock(&mm->page_table_lock);
449 case OMAP_MMU_CAM_PAGESIZE_16MB:
450 omap_mmu_alloc_supersection(mm, va, pa, prot);
452 case OMAP_MMU_CAM_PAGESIZE_1MB:
453 omap_mmu_alloc_section(mm, va, pa, prot);
455 case OMAP_MMU_CAM_PAGESIZE_64KB:
456 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
458 case OMAP_MMU_CAM_PAGESIZE_4KB:
459 ret = omap_mmu_alloc_page(mm, va, pa, prot);
466 spin_unlock(&mm->page_table_lock);
471 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
475 struct mm_struct *mm = mmu->twl_mm;
477 spin_lock(&mm->page_table_lock);
479 pmdp = pmd_offset(pgd_offset(mm, virt), virt);
484 if (!pmd_table(*pmdp))
487 ptep = pte_offset_kernel(pmdp, virt);
488 pte_clear(mm, virt, ptep);
489 flush_pmd_entry((pmd_t *)ptep);
492 end = pmd_page_vaddr(*pmdp);
493 ptep = end - PTRS_PER_PTE;
495 if (!pte_none(*ptep))
499 pte_free_kernel(pmd_page_vaddr(*pmdp));
503 flush_pmd_entry(pmdp);
505 spin_unlock(&mm->page_table_lock);
511 static struct cam_ram_regset *
512 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
514 return mmu->ops->cam_ram_alloc(entry);
517 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
518 struct cam_ram_regset *cr)
520 return mmu->ops->cam_ram_valid(cr);
524 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
526 unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
529 mask = (mmu->type == OMAP_MMU_CAMERA) ?
530 CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
531 tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
533 mask = (mmu->type == OMAP_MMU_CAMERA) ?
534 CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
535 tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
539 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
541 omap_mmu_write_reg(mmu,
542 (lock->base << MMU_LOCK_BASE_SHIFT) |
543 (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
546 static inline void omap_mmu_flush(struct omap_mmu *mmu)
548 omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
551 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
553 omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
556 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
557 struct cam_ram_regset *cr)
560 omap_mmu_set_tlb_lock(mmu, lock);
562 if (likely(mmu->ops->read_tlb))
563 mmu->ops->read_tlb(mmu, cr);
565 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
567 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
569 if (likely(mmu->ops->load_tlb))
570 mmu->ops->load_tlb(mmu, cr);
572 /* flush the entry */
575 /* load a TLB entry */
579 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
580 struct omap_mmu_tlb_entry *entry)
582 struct omap_mmu_tlb_lock lock;
583 struct cam_ram_regset *cr;
585 clk_enable(mmu->clk);
586 omap_dsp_request_mem();
588 omap_mmu_get_tlb_lock(mmu, &lock);
589 for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
590 struct cam_ram_regset tmp;
592 /* read a TLB entry */
593 omap_mmu_read_tlb(mmu, &lock, &tmp);
594 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
597 omap_mmu_set_tlb_lock(mmu, &lock);
600 /* The last entry cannot be locked? */
601 if (lock.victim == (mmu->nr_tlb_entries - 1)) {
602 printk(KERN_ERR "MMU: TLB is full.\n");
606 cr = omap_mmu_cam_ram_alloc(mmu, entry);
610 omap_mmu_load_tlb(mmu, cr);
613 /* update lock base */
614 if (lock.victim == lock.base)
617 omap_mmu_set_tlb_lock(mmu, &lock);
619 omap_dsp_release_mem();
620 clk_disable(mmu->clk);
623 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
625 static inline unsigned long
626 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
628 return mmu->ops->cam_va(cr);
631 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
633 struct omap_mmu_tlb_lock lock;
637 clk_enable(mmu->clk);
638 omap_dsp_request_mem();
640 omap_mmu_get_tlb_lock(mmu, &lock);
641 for (i = 0; i < lock.base; i++) {
642 struct cam_ram_regset cr;
644 /* read a TLB entry */
646 omap_mmu_read_tlb(mmu, &lock, &cr);
647 if (!omap_mmu_cam_ram_valid(mmu, &cr))
650 if (omap_mmu_cam_va(mmu, &cr) == vadr)
651 /* flush the entry */
657 /* set new lock base */
658 lock.base = lock.victim = max_valid + 1;
659 omap_mmu_set_tlb_lock(mmu, &lock);
661 omap_dsp_release_mem();
662 clk_disable(mmu->clk);
665 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
667 static void omap_mmu_gflush(struct omap_mmu *mmu)
669 struct omap_mmu_tlb_lock lock;
671 clk_enable(mmu->clk);
672 omap_dsp_request_mem();
674 omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
675 lock.base = lock.victim = mmu->nr_exmap_preserved;
676 omap_mmu_set_tlb_lock(mmu, &lock);
678 omap_dsp_release_mem();
679 clk_disable(mmu->clk);
682 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
683 struct omap_mmu_tlb_entry *entry)
686 if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
687 /*XXX use PG_flag for prsvd */
688 ret = omap_mmu_load_pte(mmu, entry);
693 ret = omap_mmu_load_tlb_entry(mmu, entry);
696 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
698 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
700 int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
703 if (mmu->ops->pte_get_attr)
704 omap_mmu_clear_pte(mmu, vadr);
707 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
712 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
713 * In this case, the buffer for DSP is allocated in this routine,
715 * On the other hand, for example - frame buffer sharing, calls
716 * this function with padr set. It means some known address space
717 * pointed with padr is going to be shared with DSP.
719 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
720 unsigned long padr, unsigned long size,
721 enum exmap_type type)
725 unsigned int order = 0;
728 unsigned long _dspadr = dspadr;
729 unsigned long _padr = padr;
730 void *_vadr = omap_mmu_to_virt(mmu, dspadr);
731 unsigned long _size = size;
732 struct omap_mmu_tlb_entry tlb_ent;
733 struct exmap_tbl *exmap_ent, *tmp_ent;
737 #define MINIMUM_PAGESZ SZ_4K
741 if (!is_aligned(size, MINIMUM_PAGESZ)) {
743 "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
746 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
748 "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
751 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
753 "MMU: physical address(0x%lx) is not aligned.\n",
758 /* address validity check */
759 if ((dspadr < mmu->memsize) ||
760 (dspadr >= (1 << mmu->addrspace))) {
762 "MMU: illegal address/size for %s().\n",
767 down_write(&mmu->exmap_sem);
770 omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
771 unsigned long mapsize;
775 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
776 if ((_vadr + size > tmp_ent->vadr) &&
777 (_vadr < tmp_ent->vadr + mapsize)) {
778 printk(KERN_ERR "MMU: exmap page overlap!\n");
779 up_write(&mmu->exmap_sem);
786 /* Are there any free TLB lines? */
787 for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
788 if (!mmu->exmap_tbl[idx].valid)
791 printk(KERN_ERR "MMU: DSP TLB is full.\n");
796 exmap_ent = mmu->exmap_tbl + idx;
798 if ((_size >= SZ_1M) &&
799 (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
800 is_aligned(_dspadr, SZ_1M)) {
802 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
803 } else if ((_size >= SZ_64K) &&
804 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
805 is_aligned(_dspadr, SZ_64K)) {
807 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
810 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
813 order = get_order(unit);
815 /* buffer allocation */
816 if (type == EXMAP_TYPE_MEM) {
817 struct page *page, *ps, *pe;
819 if ((order == ORDER_1MB) && likely(mempool_1M))
820 buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
821 else if ((order == ORDER_64KB) && likely(mempool_64K))
822 buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
824 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
831 /* mark the pages as reserved; this is needed for mmap */
832 ps = virt_to_page(buf);
833 pe = virt_to_page(buf + unit);
835 for (page = ps; page < pe; page++)
836 SetPageReserved(page);
842 * mapping for ARM MMU:
843 * we should not access to the allocated memory through 'buf'
844 * since this area should not be cached.
846 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
850 /* loading DSP PTE entry */
851 INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
852 status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
854 exmap_clear_armmmu((unsigned long)_vadr, unit);
858 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
859 exmap_ent->link.prev = prev;
861 mmu->exmap_tbl[prev].link.next = idx;
863 if ((_size -= unit) == 0) { /* normal completion */
864 up_write(&mmu->exmap_sem);
870 _padr = padr ? _padr + unit : 0;
875 up_write(&mmu->exmap_sem);
877 omap_mmu_free_pages((unsigned long)buf, order);
878 omap_mmu_exunmap(mmu, dspadr);
881 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
883 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
887 /* clearing ARM MMU */
888 size = 1 << (ent->order + PAGE_SHIFT);
889 exmap_clear_armmmu((unsigned long)ent->vadr, size);
891 /* freeing allocated memory */
892 if (ent->type == EXMAP_TYPE_MEM) {
893 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
895 "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
903 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
908 struct exmap_tbl *ent;
911 vadr = omap_mmu_to_virt(mmu, dspadr);
912 down_write(&mmu->exmap_sem);
913 for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
914 ent = mmu->exmap_tbl + idx;
915 if (!ent->valid || ent->prsvd)
917 if (ent->vadr == vadr)
920 up_write(&mmu->exmap_sem);
922 "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
926 if (ent->usecount > 0) {
928 "MMU: exmap reference count is not 0.\n"
929 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
930 idx, ent->vadr, ent->order, ent->usecount);
931 up_write(&mmu->exmap_sem);
934 /* clearing DSP PTE entry */
935 omap_mmu_clear_pte_entry(mmu, dspadr);
937 /* clear ARM MMU and free buffer */
938 size = unmap_free_arm(ent);
941 /* we don't free PTEs */
944 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
946 /* check if next mapping is in same group */
947 idx = ent->link.next;
949 goto up_out; /* normal completion */
950 ent = mmu->exmap_tbl + idx;
953 if (ent->vadr == vadr)
954 goto found_map; /* continue */
957 "MMU: illegal exmap_tbl grouping!\n"
958 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
959 vadr, idx, ent->vadr);
960 up_write(&mmu->exmap_sem);
964 up_write(&mmu->exmap_sem);
967 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
969 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
971 struct exmap_tbl *ent;
973 down_write(&mmu->exmap_sem);
975 /* clearing TLB entry */
976 omap_mmu_gflush(mmu);
978 omap_mmu_for_each_tlb_entry(mmu, ent)
979 if (ent->valid && !ent->prsvd)
983 if (likely(mmu->membase))
984 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
985 mmu->membase + (1 << mmu->addrspace));
987 up_write(&mmu->exmap_sem);
989 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
991 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
992 unsigned long dspadr, int index)
996 struct omap_mmu_tlb_entry tlb_ent;
999 virt = omap_mmu_to_virt(mmu, dspadr);
1000 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1001 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1002 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1003 omap_mmu_load_pte_entry(mmu, &tlb_ent);
1005 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1007 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
1009 void *virt = omap_mmu_to_virt(mmu, dspadr);
1011 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1012 /* DSP MMU is shutting down. not handled here. */
1014 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1016 static void omap_mmu_reset(struct omap_mmu *mmu)
1020 omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
1022 for (i = 0; i < 10000; i++)
1023 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
1027 void omap_mmu_disable(struct omap_mmu *mmu)
1029 omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
1031 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1033 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1035 u32 val = MMU_CNTL_MMUENABLE;
1036 u32 pa = (u32)virt_to_phys(mmu->twl_mm->pgd);
1039 omap_mmu_reset(mmu);
1041 if (mmu->ops->pte_get_attr) {
1042 omap_mmu_write_reg(mmu, pa, MMU_TTB);
1043 val |= MMU_CNTL_TWLENABLE;
1046 omap_mmu_write_reg(mmu, val, MMU_CNTL);
1048 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1050 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1052 struct omap_mmu *mmu = dev_id;
1054 if (likely(mmu->ops->interrupt))
1055 mmu->ops->interrupt(mmu);
1060 static int omap_mmu_init(struct omap_mmu *mmu)
1062 struct omap_mmu_tlb_lock tlb_lock;
1065 clk_enable(mmu->clk);
1066 omap_dsp_request_mem();
1067 down_write(&mmu->exmap_sem);
1069 ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1073 "failed to register MMU interrupt: %d\n", ret);
1077 omap_mmu_disable(mmu); /* clear all */
1079 omap_mmu_enable(mmu, 1);
1081 memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1082 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1084 if (unlikely(mmu->ops->startup))
1085 ret = mmu->ops->startup(mmu);
1087 up_write(&mmu->exmap_sem);
1088 omap_dsp_release_mem();
1089 clk_disable(mmu->clk);
1094 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1096 free_irq(mmu->irq, mmu);
1098 if (unlikely(mmu->ops->shutdown))
1099 mmu->ops->shutdown(mmu);
1101 omap_mmu_exmap_flush(mmu);
1102 omap_mmu_disable(mmu); /* clear all */
1106 * omap_mmu_mem_enable() / disable()
1108 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1110 if (unlikely(mmu->ops->mem_enable))
1111 return mmu->ops->mem_enable(mmu, addr);
1113 down_read(&mmu->exmap_sem);
1116 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1118 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1120 if (unlikely(mmu->ops->mem_disable)) {
1121 mmu->ops->mem_disable(mmu, addr);
1125 up_read(&mmu->exmap_sem);
1127 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1130 * dsp_mem file operations
1132 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1135 unsigned long p = *ppos;
1136 void *vadr = omap_mmu_to_virt(mmu, p);
1137 ssize_t size = mmu->memsize;
1142 clk_enable(mmu->memclk);
1144 if (count > size - p)
1146 if (copy_to_user(buf, vadr, read)) {
1152 clk_disable(mmu->memclk);
1156 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1159 unsigned long p = *ppos;
1160 void *vadr = omap_mmu_to_virt(mmu, p);
1162 if (!exmap_valid(mmu, vadr, count)) {
1164 "MMU: DSP address %08lx / size %08x "
1165 "is not valid!\n", p, count);
1168 if (count > (1 << mmu->addrspace) - p)
1169 count = (1 << mmu->addrspace) - p;
1170 if (copy_to_user(buf, vadr, count))
1177 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1178 loff_t offset, size_t count)
1180 struct device *dev = to_dev(kobj);
1181 struct omap_mmu *mmu = dev_get_drvdata(dev);
1182 unsigned long p = (unsigned long)offset;
1183 void *vadr = omap_mmu_to_virt(mmu, p);
1186 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1189 if (p < mmu->memsize)
1190 ret = intmem_read(mmu, buf, count, &offset);
1192 ret = exmem_read(mmu, buf, count, &offset);
1194 omap_mmu_mem_disable(mmu, vadr);
1199 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1202 unsigned long p = *ppos;
1203 void *vadr = omap_mmu_to_virt(mmu, p);
1204 ssize_t size = mmu->memsize;
1209 clk_enable(mmu->memclk);
1211 if (count > size - p)
1213 if (copy_from_user(vadr, buf, written)) {
1219 clk_disable(mmu->memclk);
1223 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1226 unsigned long p = *ppos;
1227 void *vadr = omap_mmu_to_virt(mmu, p);
1229 if (!exmap_valid(mmu, vadr, count)) {
1231 "MMU: DSP address %08lx / size %08x "
1232 "is not valid!\n", p, count);
1235 if (count > (1 << mmu->addrspace) - p)
1236 count = (1 << mmu->addrspace) - p;
1237 if (copy_from_user(vadr, buf, count))
1244 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1245 loff_t offset, size_t count)
1247 struct device *dev = to_dev(kobj);
1248 struct omap_mmu *mmu = dev_get_drvdata(dev);
1249 unsigned long p = (unsigned long)offset;
1250 void *vadr = omap_mmu_to_virt(mmu, p);
1253 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1256 if (p < mmu->memsize)
1257 ret = intmem_write(mmu, buf, count, &offset);
1259 ret = exmem_write(mmu, buf, count, &offset);
1261 omap_mmu_mem_disable(mmu, vadr);
1266 static struct bin_attribute dev_attr_mem = {
1269 .owner = THIS_MODULE,
1270 .mode = S_IRUSR | S_IWUSR | S_IRGRP,
1273 .read = omap_mmu_mem_read,
1274 .write = omap_mmu_mem_write,
1277 /* To be obsolete for backward compatibility */
1278 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1279 loff_t offset, size_t count)
1281 return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1283 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1285 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1286 loff_t offset, size_t count)
1288 return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1290 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1295 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1298 struct omap_mmu *mmu = dev_get_drvdata(dev);
1299 struct omap_mmu_tlb_lock tlb_lock;
1302 clk_enable(mmu->clk);
1303 omap_dsp_request_mem();
1305 down_read(&mmu->exmap_sem);
1307 omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1309 if (likely(mmu->ops->show))
1310 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1312 /* restore victim entry */
1313 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1315 up_read(&mmu->exmap_sem);
1316 omap_dsp_release_mem();
1317 clk_disable(mmu->clk);
1322 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1324 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1327 struct omap_mmu *mmu = dev_get_drvdata(dev);
1328 struct exmap_tbl *ent;
1332 down_read(&mmu->exmap_sem);
1333 len = sprintf(buf, " dspadr size buf size uc\n");
1334 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
1336 omap_mmu_for_each_tlb_entry(mmu, ent) {
1339 enum exmap_type type;
1342 /* find a top of link */
1343 if (!ent->valid || (ent->link.prev >= 0))
1351 ent = mmu->exmap_tbl + idx;
1352 size += PAGE_SIZE << ent->order;
1353 } while ((idx = ent->link.next) >= 0);
1355 len += sprintf(buf + len, "0x%06lx %#8lx",
1356 virt_to_omap_mmu(mmu, vadr), size);
1358 if (type == EXMAP_TYPE_FB) {
1359 len += sprintf(buf + len, " framebuf\n");
1361 len += sprintf(buf + len, "\n");
1364 ent = mmu->exmap_tbl + idx;
1365 len += sprintf(buf + len,
1366 /* 0xc0171000 0x100000 0*/
1367 "%19s0x%8p %#8lx %2d\n",
1369 PAGE_SIZE << ent->order,
1371 } while ((idx = ent->link.next) >= 0);
1377 up_read(&mmu->exmap_sem);
1381 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1385 struct omap_mmu *mmu = dev_get_drvdata(dev);
1386 unsigned long base = 0, len = 0;
1389 sscanf(buf, "%lx %lx", &base, &len);
1395 /* Add the mapping */
1396 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1400 /* Remove the mapping */
1401 ret = omap_mmu_exunmap(mmu, base);
1409 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1411 static ssize_t mempool_show(struct class *class, char *buf)
1413 int min_nr_1M = 0, curr_nr_1M = 0;
1414 int min_nr_64K = 0, curr_nr_64K = 0;
1417 if (likely(mempool_1M)) {
1418 min_nr_1M = mempool_1M->min_nr;
1419 curr_nr_1M = mempool_1M->curr_nr;
1420 total += min_nr_1M * SZ_1M;
1422 if (likely(mempool_64K)) {
1423 min_nr_64K = mempool_64K->min_nr;
1424 curr_nr_64K = mempool_64K->curr_nr;
1425 total += min_nr_64K * SZ_64K;
1430 "1M buffer: %d (%d free)\n"
1431 "64K buffer: %d (%d free)\n",
1432 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1436 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1438 static void omap_mmu_class_dev_release(struct device *dev)
1442 static struct class omap_mmu_class = {
1444 .dev_release = omap_mmu_class_dev_release,
1447 int omap_mmu_register(struct omap_mmu *mmu)
1451 mmu->dev.class = &omap_mmu_class;
1452 strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1453 dev_set_drvdata(&mmu->dev, mmu);
1455 mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1457 if (!mmu->exmap_tbl)
1460 if (mmu->ops->pte_get_attr) {
1461 struct mm_struct *mm = mm_alloc();
1469 ret = device_register(&mmu->dev);
1471 goto err_dev_register;
1473 init_rwsem(&mmu->exmap_sem);
1475 ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1476 printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1477 mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1479 ret = omap_mmu_init(mmu);
1483 ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1485 goto err_dev_create_mmu;
1486 ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1488 goto err_dev_create_exmap;
1490 if (likely(mmu->membase)) {
1491 dev_attr_mem.size = mmu->memsize;
1492 ret = device_create_bin_file(&mmu->dev,
1495 goto err_bin_create_mem;
1501 device_remove_file(&mmu->dev, &dev_attr_exmap);
1502 err_dev_create_exmap:
1503 device_remove_file(&mmu->dev, &dev_attr_mmu);
1505 omap_mmu_shutdown(mmu);
1507 device_unregister(&mmu->dev);
1512 kfree(mmu->exmap_tbl);
1513 mmu->exmap_tbl = NULL;
1516 EXPORT_SYMBOL_GPL(omap_mmu_register);
1518 void omap_mmu_unregister(struct omap_mmu *mmu)
1520 omap_mmu_shutdown(mmu);
1521 omap_mmu_kmem_release();
1523 device_remove_file(&mmu->dev, &dev_attr_mmu);
1524 device_remove_file(&mmu->dev, &dev_attr_exmap);
1526 if (likely(mmu->membase))
1527 device_remove_bin_file(&mmu->dev,
1530 kfree(mmu->exmap_tbl);
1531 mmu->exmap_tbl = NULL;
1533 if (mmu->ops->pte_get_attr) {
1535 __mmdrop(mmu->twl_mm);
1540 device_unregister(&mmu->dev);
1542 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1544 static int __init omap_mmu_class_init(void)
1546 int ret = class_register(&omap_mmu_class);
1548 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1553 static void __exit omap_mmu_class_exit(void)
1555 class_remove_file(&omap_mmu_class, &class_attr_mempool);
1556 class_unregister(&omap_mmu_class);
1559 subsys_initcall(omap_mmu_class_init);
1560 module_exit(omap_mmu_class_exit);
1562 MODULE_LICENSE("GPL");