2 * linux/arch/arm/plat-omap/mmu.c
4 * OMAP MMU management framework
6 * Copyright (C) 2002-2006 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <lethal@linux-sh.org>
11 * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/err.h>
32 #include <linux/clk.h>
33 #include <linux/device.h>
34 #include <linux/interrupt.h>
35 #include <asm/uaccess.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/arch/mmu.h>
40 #include <asm/sizes.h>
42 #if defined(CONFIG_ARCH_OMAP1)
43 #include "../mach-omap1/mmu.h"
44 #elif defined(CONFIG_ARCH_OMAP2)
45 #include "../mach-omap2/mmu.h"
49 * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
50 * MMU has base and victim implemented in different bits in the LOCK
51 * register (shifts are still the same), all of the other registers are
52 * the same on all of the MMUs..
54 #define MMU_LOCK_BASE_SHIFT 10
55 #define MMU_LOCK_VICTIM_SHIFT 4
57 #define CAMERA_MMU_LOCK_BASE_MASK (0x7 << MMU_LOCK_BASE_SHIFT)
58 #define CAMERA_MMU_LOCK_VICTIM_MASK (0x7 << MMU_LOCK_VICTIM_SHIFT)
60 #define is_aligned(adr,align) (!((adr)&((align)-1)))
61 #define ORDER_1MB (20 - PAGE_SHIFT)
62 #define ORDER_64KB (16 - PAGE_SHIFT)
63 #define ORDER_4KB (12 - PAGE_SHIFT)
65 #define MMU_CNTL_EMUTLBUPDATE (1<<3)
66 #define MMU_CNTL_TWLENABLE (1<<2)
67 #define MMU_CNTL_MMUENABLE (1<<1)
69 static mempool_t *mempool_1M;
70 static mempool_t *mempool_64K;
72 #define omap_mmu_for_each_tlb_entry(mmu, entry) \
73 for (entry = mmu->exmap_tbl; prefetch(entry + 1), \
74 entry < (mmu->exmap_tbl + mmu->nr_tlb_entries); \
77 #define to_dev(obj) container_of(obj, struct device, kobj)
79 static void *mempool_alloc_from_pool(mempool_t *pool,
80 unsigned int __nocast gfp_mask)
82 spin_lock_irq(&pool->lock);
83 if (likely(pool->curr_nr)) {
84 void *element = pool->elements[--pool->curr_nr];
85 spin_unlock_irq(&pool->lock);
89 spin_unlock_irq(&pool->lock);
90 return mempool_alloc(pool, gfp_mask);
94 * kmem_reserve(), kmem_release():
95 * reserve or release kernel memory for exmap().
97 * exmap() might request consecutive 1MB or 64kB,
98 * but it will be difficult after memory pages are fragmented.
99 * So, user can reserve such memory blocks in the early phase
100 * through kmem_reserve().
102 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
104 return (void *)__get_dma_pages(gfp, (unsigned int)order);
107 static void omap_mmu_pool_free(void *buf, void *order)
109 free_pages((unsigned long)buf, (unsigned int)order);
112 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
114 unsigned long len = size;
116 /* alignment check */
117 if (!is_aligned(size, SZ_64K)) {
119 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
123 if (size > (1 << mmu->addrspace)) {
125 "omapdsp: size(0x%lx) is larger than DSP memory space "
126 "size (0x%x.\n", size, (1 << mmu->addrspace));
133 if (likely(!mempool_1M))
134 mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
138 mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
141 size &= ~(0xf << 20);
144 if (size >= SZ_64K) {
147 if (likely(!mempool_64K))
148 mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
152 mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
155 size &= ~(0xf << 16);
163 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
165 void omap_mmu_kmem_release(void)
168 mempool_destroy(mempool_64K);
173 mempool_destroy(mempool_1M);
177 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
179 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
181 struct page *page, *ps, *pe;
183 ps = virt_to_page(buf);
184 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
186 for (page = ps; page < pe; page++)
187 ClearPageReserved(page);
189 if ((order == ORDER_64KB) && likely(mempool_64K))
190 mempool_free((void *)buf, mempool_64K);
191 else if ((order == ORDER_1MB) && likely(mempool_1M))
192 mempool_free((void *)buf, mempool_1M);
194 free_pages(buf, order);
200 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
203 unsigned long sz_left;
206 int prot_pmd, prot_pte;
209 "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
212 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
213 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
215 pmdp = pmd_offset(pgd_offset_k(virt), virt);
216 if (pmd_none(*pmdp)) {
217 ptep = pte_alloc_one_kernel(&init_mm, 0);
220 /* note: two PMDs will be set */
221 pmd_populate_kernel(&init_mm, pmdp, ptep);
226 sz_left >= PAGE_SIZE;
227 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
228 ptep = pte_offset_kernel(pmdp, virt);
229 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
236 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
238 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
240 unsigned long sz_left;
245 "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
249 sz_left >= PAGE_SIZE;
250 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
251 pmdp = pmd_offset(pgd_offset_k(virt), virt);
252 ptep = pte_offset_kernel(pmdp, virt);
253 pte_clear(&init_mm, virt, ptep);
258 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
260 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
262 /* exmap_sem should be held before calling this function */
263 struct exmap_tbl *ent;
266 omap_mmu_for_each_tlb_entry(mmu, ent) {
268 unsigned long mapsize;
272 mapadr = (void *)ent->vadr;
273 mapsize = 1 << (ent->order + PAGE_SHIFT);
274 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
275 if (vadr + len <= mapadr + mapsize) {
276 /* this map covers whole address. */
280 * this map covers partially.
281 * check rest portion.
283 len -= mapadr + mapsize - vadr;
284 vadr = mapadr + mapsize;
292 EXPORT_SYMBOL_GPL(exmap_valid);
295 * omap_mmu_exmap_use(), unuse():
296 * when the mapped area is exported to user space with mmap,
297 * the usecount is incremented.
298 * while the usecount > 0, that area can't be released.
300 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
302 struct exmap_tbl *ent;
304 down_write(&mmu->exmap_sem);
305 omap_mmu_for_each_tlb_entry(mmu, ent) {
307 unsigned long mapsize;
311 mapadr = (void *)ent->vadr;
312 mapsize = 1 << (ent->order + PAGE_SHIFT);
313 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
316 up_write(&mmu->exmap_sem);
318 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
320 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
322 struct exmap_tbl *ent;
324 down_write(&mmu->exmap_sem);
325 omap_mmu_for_each_tlb_entry(mmu, ent) {
327 unsigned long mapsize;
331 mapadr = (void *)ent->vadr;
332 mapsize = 1 << (ent->order + PAGE_SHIFT);
333 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
336 up_write(&mmu->exmap_sem);
338 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
341 * omap_mmu_virt_to_phys()
342 * returns physical address, and sets len to valid length
345 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
347 struct exmap_tbl *ent;
349 if (omap_mmu_internal_memory(mmu, vadr)) {
350 unsigned long addr = (unsigned long)vadr;
351 *len = mmu->membase + mmu->memsize - addr;
356 omap_mmu_for_each_tlb_entry(mmu, ent) {
358 unsigned long mapsize;
362 mapadr = (void *)ent->vadr;
363 mapsize = 1 << (ent->order + PAGE_SHIFT);
364 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
365 *len = mapadr + mapsize - vadr;
366 return __pa(ent->buf) + vadr - mapadr;
370 /* valid mapping not found */
373 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
379 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
380 unsigned long phys, int prot)
382 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
383 if (virt & (1 << SECTION_SHIFT))
385 *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
386 flush_pmd_entry(pmdp);
390 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
391 unsigned long phys, int prot)
394 for (i = 0; i < 16; i += 1) {
395 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
396 virt += (PGDIR_SIZE / 2);
401 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
402 unsigned long phys, pgprot_t prot)
405 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
407 if (!(prot & PTE_TYPE_MASK))
408 prot |= PTE_TYPE_SMALL;
410 if (pmd_none(*pmdp)) {
411 ptep = pte_alloc_one_kernel(mm, virt);
414 pmd_populate_kernel(mm, pmdp, ptep);
416 ptep = pte_offset_kernel(pmdp, virt);
417 ptep -= PTRS_PER_PTE;
418 *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
419 flush_pmd_entry((pmd_t *)ptep);
424 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
425 unsigned long phys, pgprot_t prot)
428 for (i = 0; i < 16; i += 1) {
429 ret = omap_mmu_alloc_page(mm, virt, phys,
430 prot | PTE_TYPE_LARGE);
432 return -ENOMEM; /* only 1st time */
438 static int omap_mmu_load_pte(struct omap_mmu *mmu,
439 struct omap_mmu_tlb_entry *e)
442 struct mm_struct *mm = mmu->twl_mm;
443 const unsigned long va = e->va;
444 const unsigned long pa = e->pa;
445 const pgprot_t prot = mmu->ops->pte_get_attr(e);
447 spin_lock(&mm->page_table_lock);
450 case OMAP_MMU_CAM_PAGESIZE_16MB:
451 omap_mmu_alloc_supersection(mm, va, pa, prot);
453 case OMAP_MMU_CAM_PAGESIZE_1MB:
454 omap_mmu_alloc_section(mm, va, pa, prot);
456 case OMAP_MMU_CAM_PAGESIZE_64KB:
457 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
459 case OMAP_MMU_CAM_PAGESIZE_4KB:
460 ret = omap_mmu_alloc_page(mm, va, pa, prot);
467 spin_unlock(&mm->page_table_lock);
472 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
476 struct mm_struct *mm = mmu->twl_mm;
478 spin_lock(&mm->page_table_lock);
480 pmdp = pmd_offset(pgd_offset(mm, virt), virt);
485 if (!pmd_table(*pmdp))
488 ptep = pte_offset_kernel(pmdp, virt);
489 pte_clear(mm, virt, ptep);
490 flush_pmd_entry((pmd_t *)ptep);
493 end = pmd_page_vaddr(*pmdp);
494 ptep = end - PTRS_PER_PTE;
496 if (!pte_none(*ptep))
500 pte_free_kernel(pmd_page_vaddr(*pmdp));
504 flush_pmd_entry(pmdp);
506 spin_unlock(&mm->page_table_lock);
512 static struct cam_ram_regset *
513 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
515 return mmu->ops->cam_ram_alloc(entry);
518 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
519 struct cam_ram_regset *cr)
521 return mmu->ops->cam_ram_valid(cr);
525 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
527 unsigned long lock = omap_mmu_read_reg(mmu, OMAP_MMU_LOCK);
530 mask = (mmu->type == OMAP_MMU_CAMERA) ?
531 CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
532 tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
534 mask = (mmu->type == OMAP_MMU_CAMERA) ?
535 CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
536 tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
540 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
542 omap_mmu_write_reg(mmu,
543 (lock->base << MMU_LOCK_BASE_SHIFT) |
544 (lock->victim << MMU_LOCK_VICTIM_SHIFT),
548 static inline void omap_mmu_flush(struct omap_mmu *mmu)
550 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_FLUSH_ENTRY);
553 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
555 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_LD_TLB);
558 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
559 struct cam_ram_regset *cr)
562 omap_mmu_set_tlb_lock(mmu, lock);
564 if (likely(mmu->ops->read_tlb))
565 mmu->ops->read_tlb(mmu, cr);
567 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
569 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
571 if (likely(mmu->ops->load_tlb))
572 mmu->ops->load_tlb(mmu, cr);
574 /* flush the entry */
577 /* load a TLB entry */
581 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
582 struct omap_mmu_tlb_entry *entry)
584 struct omap_mmu_tlb_lock lock;
585 struct cam_ram_regset *cr;
587 clk_enable(mmu->clk);
588 omap_dsp_request_mem();
590 omap_mmu_get_tlb_lock(mmu, &lock);
591 for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
592 struct cam_ram_regset tmp;
594 /* read a TLB entry */
595 omap_mmu_read_tlb(mmu, &lock, &tmp);
596 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
599 omap_mmu_set_tlb_lock(mmu, &lock);
602 /* The last entry cannot be locked? */
603 if (lock.victim == (mmu->nr_tlb_entries - 1)) {
604 printk(KERN_ERR "MMU: TLB is full.\n");
608 cr = omap_mmu_cam_ram_alloc(mmu, entry);
612 omap_mmu_load_tlb(mmu, cr);
615 /* update lock base */
616 if (lock.victim == lock.base)
619 omap_mmu_set_tlb_lock(mmu, &lock);
621 omap_dsp_release_mem();
622 clk_disable(mmu->clk);
625 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
627 static inline unsigned long
628 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
630 return mmu->ops->cam_va(cr);
633 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
635 struct omap_mmu_tlb_lock lock;
639 clk_enable(mmu->clk);
640 omap_dsp_request_mem();
642 omap_mmu_get_tlb_lock(mmu, &lock);
643 for (i = 0; i < lock.base; i++) {
644 struct cam_ram_regset cr;
646 /* read a TLB entry */
648 omap_mmu_read_tlb(mmu, &lock, &cr);
649 if (!omap_mmu_cam_ram_valid(mmu, &cr))
652 if (omap_mmu_cam_va(mmu, &cr) == vadr)
653 /* flush the entry */
659 /* set new lock base */
660 lock.base = lock.victim = max_valid + 1;
661 omap_mmu_set_tlb_lock(mmu, &lock);
663 omap_dsp_release_mem();
664 clk_disable(mmu->clk);
667 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
669 static void omap_mmu_gflush(struct omap_mmu *mmu)
671 struct omap_mmu_tlb_lock lock;
673 clk_enable(mmu->clk);
674 omap_dsp_request_mem();
676 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_GFLUSH);
677 lock.base = lock.victim = mmu->nr_exmap_preserved;
678 omap_mmu_set_tlb_lock(mmu, &lock);
680 omap_dsp_release_mem();
681 clk_disable(mmu->clk);
684 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
685 struct omap_mmu_tlb_entry *entry)
688 if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
689 /*XXX use PG_flag for prsvd */
690 ret = omap_mmu_load_pte(mmu, entry);
695 ret = omap_mmu_load_tlb_entry(mmu, entry);
698 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
700 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
702 int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
705 if (mmu->ops->pte_get_attr)
706 omap_mmu_clear_pte(mmu, vadr);
709 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
714 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
715 * In this case, the buffer for DSP is allocated in this routine,
717 * On the other hand, for example - frame buffer sharing, calls
718 * this function with padr set. It means some known address space
719 * pointed with padr is going to be shared with DSP.
721 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
722 unsigned long padr, unsigned long size,
723 enum exmap_type type)
727 unsigned int order = 0;
730 unsigned long _dspadr = dspadr;
731 unsigned long _padr = padr;
732 void *_vadr = omap_mmu_to_virt(mmu, dspadr);
733 unsigned long _size = size;
734 struct omap_mmu_tlb_entry tlb_ent;
735 struct exmap_tbl *exmap_ent, *tmp_ent;
739 #define MINIMUM_PAGESZ SZ_4K
743 if (!is_aligned(size, MINIMUM_PAGESZ)) {
745 "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
748 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
750 "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
753 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
755 "MMU: physical address(0x%lx) is not aligned.\n",
760 /* address validity check */
761 if ((dspadr < mmu->memsize) ||
762 (dspadr >= (1 << mmu->addrspace))) {
764 "MMU: illegal address/size for %s().\n",
769 down_write(&mmu->exmap_sem);
772 omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
773 unsigned long mapsize;
777 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
778 if ((_vadr + size > tmp_ent->vadr) &&
779 (_vadr < tmp_ent->vadr + mapsize)) {
780 printk(KERN_ERR "MMU: exmap page overlap!\n");
781 up_write(&mmu->exmap_sem);
788 /* Are there any free TLB lines? */
789 for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
790 if (!mmu->exmap_tbl[idx].valid)
793 printk(KERN_ERR "MMU: DSP TLB is full.\n");
798 exmap_ent = mmu->exmap_tbl + idx;
800 if ((_size >= SZ_1M) &&
801 (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
802 is_aligned(_dspadr, SZ_1M)) {
804 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
805 } else if ((_size >= SZ_64K) &&
806 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
807 is_aligned(_dspadr, SZ_64K)) {
809 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
812 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
815 order = get_order(unit);
817 /* buffer allocation */
818 if (type == EXMAP_TYPE_MEM) {
819 struct page *page, *ps, *pe;
821 if ((order == ORDER_1MB) && likely(mempool_1M))
822 buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
823 else if ((order == ORDER_64KB) && likely(mempool_64K))
824 buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
826 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
833 /* mark the pages as reserved; this is needed for mmap */
834 ps = virt_to_page(buf);
835 pe = virt_to_page(buf + unit);
837 for (page = ps; page < pe; page++)
838 SetPageReserved(page);
844 * mapping for ARM MMU:
845 * we should not access to the allocated memory through 'buf'
846 * since this area should not be cached.
848 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
852 /* loading DSP PTE entry */
853 INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
854 status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
856 exmap_clear_armmmu((unsigned long)_vadr, unit);
860 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
861 exmap_ent->link.prev = prev;
863 mmu->exmap_tbl[prev].link.next = idx;
865 if ((_size -= unit) == 0) { /* normal completion */
866 up_write(&mmu->exmap_sem);
872 _padr = padr ? _padr + unit : 0;
877 up_write(&mmu->exmap_sem);
879 omap_mmu_free_pages((unsigned long)buf, order);
880 omap_mmu_exunmap(mmu, dspadr);
883 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
885 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
889 /* clearing ARM MMU */
890 size = 1 << (ent->order + PAGE_SHIFT);
891 exmap_clear_armmmu((unsigned long)ent->vadr, size);
893 /* freeing allocated memory */
894 if (ent->type == EXMAP_TYPE_MEM) {
895 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
897 "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
905 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
910 struct exmap_tbl *ent;
913 vadr = omap_mmu_to_virt(mmu, dspadr);
914 down_write(&mmu->exmap_sem);
915 for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
916 ent = mmu->exmap_tbl + idx;
917 if (!ent->valid || ent->prsvd)
919 if (ent->vadr == vadr)
922 up_write(&mmu->exmap_sem);
924 "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
928 if (ent->usecount > 0) {
930 "MMU: exmap reference count is not 0.\n"
931 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
932 idx, ent->vadr, ent->order, ent->usecount);
933 up_write(&mmu->exmap_sem);
936 /* clearing DSP PTE entry */
937 omap_mmu_clear_pte_entry(mmu, dspadr);
939 /* clear ARM MMU and free buffer */
940 size = unmap_free_arm(ent);
943 /* we don't free PTEs */
946 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
948 /* check if next mapping is in same group */
949 idx = ent->link.next;
951 goto up_out; /* normal completion */
952 ent = mmu->exmap_tbl + idx;
955 if (ent->vadr == vadr)
956 goto found_map; /* continue */
959 "MMU: illegal exmap_tbl grouping!\n"
960 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
961 vadr, idx, ent->vadr);
962 up_write(&mmu->exmap_sem);
966 up_write(&mmu->exmap_sem);
969 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
971 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
973 struct exmap_tbl *ent;
975 down_write(&mmu->exmap_sem);
977 /* clearing TLB entry */
978 omap_mmu_gflush(mmu);
980 omap_mmu_for_each_tlb_entry(mmu, ent)
981 if (ent->valid && !ent->prsvd)
985 if (likely(mmu->membase))
986 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
987 mmu->membase + (1 << mmu->addrspace));
989 up_write(&mmu->exmap_sem);
991 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
993 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
994 unsigned long dspadr, int index)
998 struct omap_mmu_tlb_entry tlb_ent;
1001 virt = omap_mmu_to_virt(mmu, dspadr);
1002 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1003 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1004 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1005 omap_mmu_load_pte_entry(mmu, &tlb_ent);
1007 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1009 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
1011 void *virt = omap_mmu_to_virt(mmu, dspadr);
1013 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1014 /* DSP MMU is shutting down. not handled here. */
1016 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1018 static void omap_mmu_reset(struct omap_mmu *mmu)
1020 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1023 omap_mmu_write_reg(mmu, 0x2, OMAP_MMU_SYSCONFIG);
1025 for (i = 0; i < 10000; i++)
1026 if (likely(omap_mmu_read_reg(mmu, OMAP_MMU_SYSSTATUS) & 0x1))
1031 void omap_mmu_disable(struct omap_mmu *mmu)
1033 omap_mmu_write_reg(mmu, 0x00, OMAP_MMU_CNTL);
1035 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1037 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1039 u32 val = OMAP_MMU_CNTL_MMU_EN;
1042 omap_mmu_reset(mmu);
1043 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1044 if (mmu->ops->pte_get_attr) {
1045 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd),
1047 val |= MMU_CNTL_TWLENABLE;
1050 val |= OMAP_MMU_CNTL_RESET_SW;
1052 omap_mmu_write_reg(mmu, val, OMAP_MMU_CNTL);
1054 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1056 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1058 struct omap_mmu *mmu = dev_id;
1060 if (likely(mmu->ops->interrupt))
1061 mmu->ops->interrupt(mmu);
1066 static int omap_mmu_init(struct omap_mmu *mmu)
1068 struct omap_mmu_tlb_lock tlb_lock;
1071 clk_enable(mmu->clk);
1072 omap_dsp_request_mem();
1073 down_write(&mmu->exmap_sem);
1075 ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1079 "failed to register MMU interrupt: %d\n", ret);
1083 omap_mmu_disable(mmu); /* clear all */
1085 omap_mmu_enable(mmu, 1);
1087 memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1088 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1090 if (unlikely(mmu->ops->startup))
1091 ret = mmu->ops->startup(mmu);
1093 up_write(&mmu->exmap_sem);
1094 omap_dsp_release_mem();
1095 clk_disable(mmu->clk);
1100 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1102 free_irq(mmu->irq, mmu);
1104 if (unlikely(mmu->ops->shutdown))
1105 mmu->ops->shutdown(mmu);
1107 omap_mmu_exmap_flush(mmu);
1108 omap_mmu_disable(mmu); /* clear all */
1112 * omap_mmu_mem_enable() / disable()
1114 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1116 if (unlikely(mmu->ops->mem_enable))
1117 return mmu->ops->mem_enable(mmu, addr);
1119 down_read(&mmu->exmap_sem);
1122 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1124 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1126 if (unlikely(mmu->ops->mem_disable)) {
1127 mmu->ops->mem_disable(mmu, addr);
1131 up_read(&mmu->exmap_sem);
1133 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1136 * dsp_mem file operations
1138 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1141 unsigned long p = *ppos;
1142 void *vadr = omap_mmu_to_virt(mmu, p);
1143 ssize_t size = mmu->memsize;
1148 clk_enable(mmu->memclk);
1150 if (count > size - p)
1152 if (copy_to_user(buf, vadr, read)) {
1158 clk_disable(mmu->memclk);
1162 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1165 unsigned long p = *ppos;
1166 void *vadr = omap_mmu_to_virt(mmu, p);
1168 if (!exmap_valid(mmu, vadr, count)) {
1170 "MMU: DSP address %08lx / size %08x "
1171 "is not valid!\n", p, count);
1174 if (count > (1 << mmu->addrspace) - p)
1175 count = (1 << mmu->addrspace) - p;
1176 if (copy_to_user(buf, vadr, count))
1183 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1184 loff_t offset, size_t count)
1186 struct device *dev = to_dev(kobj);
1187 struct omap_mmu *mmu = dev_get_drvdata(dev);
1188 unsigned long p = (unsigned long)offset;
1189 void *vadr = omap_mmu_to_virt(mmu, p);
1192 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1195 if (p < mmu->memsize)
1196 ret = intmem_read(mmu, buf, count, &offset);
1198 ret = exmem_read(mmu, buf, count, &offset);
1200 omap_mmu_mem_disable(mmu, vadr);
1205 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1208 unsigned long p = *ppos;
1209 void *vadr = omap_mmu_to_virt(mmu, p);
1210 ssize_t size = mmu->memsize;
1215 clk_enable(mmu->memclk);
1217 if (count > size - p)
1219 if (copy_from_user(vadr, buf, written)) {
1225 clk_disable(mmu->memclk);
1229 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1232 unsigned long p = *ppos;
1233 void *vadr = omap_mmu_to_virt(mmu, p);
1235 if (!exmap_valid(mmu, vadr, count)) {
1237 "MMU: DSP address %08lx / size %08x "
1238 "is not valid!\n", p, count);
1241 if (count > (1 << mmu->addrspace) - p)
1242 count = (1 << mmu->addrspace) - p;
1243 if (copy_from_user(vadr, buf, count))
1250 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1251 loff_t offset, size_t count)
1253 struct device *dev = to_dev(kobj);
1254 struct omap_mmu *mmu = dev_get_drvdata(dev);
1255 unsigned long p = (unsigned long)offset;
1256 void *vadr = omap_mmu_to_virt(mmu, p);
1259 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1262 if (p < mmu->memsize)
1263 ret = intmem_write(mmu, buf, count, &offset);
1265 ret = exmem_write(mmu, buf, count, &offset);
1267 omap_mmu_mem_disable(mmu, vadr);
1272 static struct bin_attribute dev_attr_mem = {
1275 .owner = THIS_MODULE,
1276 .mode = S_IRUSR | S_IWUSR | S_IRGRP,
1279 .read = omap_mmu_mem_read,
1280 .write = omap_mmu_mem_write,
1283 /* To be obsolete for backward compatibility */
1284 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1285 loff_t offset, size_t count)
1287 return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1289 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1291 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1292 loff_t offset, size_t count)
1294 return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1296 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1301 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1304 struct omap_mmu *mmu = dev_get_drvdata(dev);
1305 struct omap_mmu_tlb_lock tlb_lock;
1308 clk_enable(mmu->clk);
1309 omap_dsp_request_mem();
1311 down_read(&mmu->exmap_sem);
1313 omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1315 if (likely(mmu->ops->show))
1316 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1318 /* restore victim entry */
1319 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1321 up_read(&mmu->exmap_sem);
1322 omap_dsp_release_mem();
1323 clk_disable(mmu->clk);
1328 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1330 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1333 struct omap_mmu *mmu = dev_get_drvdata(dev);
1334 struct exmap_tbl *ent;
1338 down_read(&mmu->exmap_sem);
1339 len = sprintf(buf, " dspadr size buf size uc\n");
1340 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
1342 omap_mmu_for_each_tlb_entry(mmu, ent) {
1345 enum exmap_type type;
1348 /* find a top of link */
1349 if (!ent->valid || (ent->link.prev >= 0))
1357 ent = mmu->exmap_tbl + idx;
1358 size += PAGE_SIZE << ent->order;
1359 } while ((idx = ent->link.next) >= 0);
1361 len += sprintf(buf + len, "0x%06lx %#8lx",
1362 virt_to_omap_mmu(mmu, vadr), size);
1364 if (type == EXMAP_TYPE_FB) {
1365 len += sprintf(buf + len, " framebuf\n");
1367 len += sprintf(buf + len, "\n");
1370 ent = mmu->exmap_tbl + idx;
1371 len += sprintf(buf + len,
1372 /* 0xc0171000 0x100000 0*/
1373 "%19s0x%8p %#8lx %2d\n",
1375 PAGE_SIZE << ent->order,
1377 } while ((idx = ent->link.next) >= 0);
1383 up_read(&mmu->exmap_sem);
1387 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1391 struct omap_mmu *mmu = dev_get_drvdata(dev);
1392 unsigned long base = 0, len = 0;
1395 sscanf(buf, "%lx %lx", &base, &len);
1401 /* Add the mapping */
1402 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1406 /* Remove the mapping */
1407 ret = omap_mmu_exunmap(mmu, base);
1415 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1417 static ssize_t mempool_show(struct class *class, char *buf)
1419 int min_nr_1M = 0, curr_nr_1M = 0;
1420 int min_nr_64K = 0, curr_nr_64K = 0;
1423 if (likely(mempool_1M)) {
1424 min_nr_1M = mempool_1M->min_nr;
1425 curr_nr_1M = mempool_1M->curr_nr;
1426 total += min_nr_1M * SZ_1M;
1428 if (likely(mempool_64K)) {
1429 min_nr_64K = mempool_64K->min_nr;
1430 curr_nr_64K = mempool_64K->curr_nr;
1431 total += min_nr_64K * SZ_64K;
1436 "1M buffer: %d (%d free)\n"
1437 "64K buffer: %d (%d free)\n",
1438 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1442 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1444 static void omap_mmu_class_dev_release(struct device *dev)
1448 static struct class omap_mmu_class = {
1450 .dev_release = omap_mmu_class_dev_release,
1453 int omap_mmu_register(struct omap_mmu *mmu)
1457 mmu->dev.class = &omap_mmu_class;
1458 strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1459 dev_set_drvdata(&mmu->dev, mmu);
1461 mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1463 if (!mmu->exmap_tbl)
1466 if (mmu->ops->pte_get_attr) {
1467 struct mm_struct *mm = mm_alloc();
1475 ret = device_register(&mmu->dev);
1477 goto err_dev_register;
1479 init_rwsem(&mmu->exmap_sem);
1481 ret = omap_mmu_init(mmu);
1485 ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1487 goto err_dev_create_mmu;
1488 ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1490 goto err_dev_create_exmap;
1492 if (likely(mmu->membase)) {
1493 dev_attr_mem.size = mmu->memsize;
1494 ret = device_create_bin_file(&mmu->dev,
1497 goto err_bin_create_mem;
1503 device_remove_file(&mmu->dev, &dev_attr_exmap);
1504 err_dev_create_exmap:
1505 device_remove_file(&mmu->dev, &dev_attr_mmu);
1507 omap_mmu_shutdown(mmu);
1509 device_unregister(&mmu->dev);
1514 kfree(mmu->exmap_tbl);
1515 mmu->exmap_tbl = NULL;
1518 EXPORT_SYMBOL_GPL(omap_mmu_register);
1520 void omap_mmu_unregister(struct omap_mmu *mmu)
1522 omap_mmu_shutdown(mmu);
1523 omap_mmu_kmem_release();
1525 device_remove_file(&mmu->dev, &dev_attr_mmu);
1526 device_remove_file(&mmu->dev, &dev_attr_exmap);
1528 if (likely(mmu->membase))
1529 device_remove_bin_file(&mmu->dev,
1532 kfree(mmu->exmap_tbl);
1533 mmu->exmap_tbl = NULL;
1535 if (mmu->ops->pte_get_attr) {
1537 __mmdrop(mmu->twl_mm);
1542 device_unregister(&mmu->dev);
1544 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1546 static int __init omap_mmu_class_init(void)
1548 int ret = class_register(&omap_mmu_class);
1550 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1555 static void __exit omap_mmu_class_exit(void)
1557 class_remove_file(&omap_mmu_class, &class_attr_mempool);
1558 class_unregister(&omap_mmu_class);
1561 subsys_initcall(omap_mmu_class_init);
1562 module_exit(omap_mmu_class_exit);
1564 MODULE_LICENSE("GPL");