2 * linux/arch/arm/plat-omap/mmu.c
4 * OMAP MMU management framework
6 * Copyright (C) 2002-2006 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <lethal@linux-sh.org>
11 * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/err.h>
32 #include <linux/clk.h>
33 #include <linux/device.h>
34 #include <linux/interrupt.h>
35 #include <asm/uaccess.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/arch/mmu.h>
40 #include <asm/sizes.h>
42 #if defined(CONFIG_ARCH_OMAP1)
43 #include "../mach-omap1/mmu.h"
44 #elif defined(CONFIG_ARCH_OMAP2)
45 #include "../mach-omap2/mmu.h"
49 * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
50 * MMU has base and victim implemented in different bits in the LOCK
51 * register (shifts are still the same), all of the other registers are
52 * the same on all of the MMUs..
54 #define MMU_LOCK_BASE_SHIFT 10
55 #define MMU_LOCK_VICTIM_SHIFT 4
57 #define CAMERA_MMU_LOCK_BASE_MASK (0x7 << MMU_LOCK_BASE_SHIFT)
58 #define CAMERA_MMU_LOCK_VICTIM_MASK (0x7 << MMU_LOCK_VICTIM_SHIFT)
60 #define is_aligned(adr,align) (!((adr)&((align)-1)))
61 #define ORDER_1MB (20 - PAGE_SHIFT)
62 #define ORDER_64KB (16 - PAGE_SHIFT)
63 #define ORDER_4KB (12 - PAGE_SHIFT)
65 #define MMU_CNTL_EMUTLBUPDATE (1<<3)
66 #define MMU_CNTL_TWLENABLE (1<<2)
67 #define MMU_CNTL_MMUENABLE (1<<1)
69 static mempool_t *mempool_1M;
70 static mempool_t *mempool_64K;
72 #define omap_mmu_for_each_tlb_entry(mmu, entry) \
73 for (entry = mmu->exmap_tbl; prefetch(entry + 1), \
74 entry < (mmu->exmap_tbl + mmu->nr_tlb_entries); \
77 #define to_dev(obj) container_of(obj, struct device, kobj)
79 static void *mempool_alloc_from_pool(mempool_t *pool,
80 unsigned int __nocast gfp_mask)
82 spin_lock_irq(&pool->lock);
83 if (likely(pool->curr_nr)) {
84 void *element = pool->elements[--pool->curr_nr];
85 spin_unlock_irq(&pool->lock);
89 spin_unlock_irq(&pool->lock);
90 return mempool_alloc(pool, gfp_mask);
94 * kmem_reserve(), kmem_release():
95 * reserve or release kernel memory for exmap().
97 * exmap() might request consecutive 1MB or 64kB,
98 * but it will be difficult after memory pages are fragmented.
99 * So, user can reserve such memory blocks in the early phase
100 * through kmem_reserve().
102 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
104 return (void *)__get_dma_pages(gfp, (unsigned int)order);
107 static void omap_mmu_pool_free(void *buf, void *order)
109 free_pages((unsigned long)buf, (unsigned int)order);
112 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
114 unsigned long len = size;
116 /* alignment check */
117 if (!is_aligned(size, SZ_64K)) {
119 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
123 if (size > (1 << mmu->addrspace)) {
125 "omapdsp: size(0x%lx) is larger than DSP memory space "
126 "size (0x%x.\n", size, (1 << mmu->addrspace));
133 if (likely(!mempool_1M))
134 mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
138 mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
141 size &= ~(0xf << 20);
144 if (size >= SZ_64K) {
147 if (likely(!mempool_64K))
148 mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
152 mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
155 size &= ~(0xf << 16);
163 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
165 void omap_mmu_kmem_release(void)
168 mempool_destroy(mempool_64K);
173 mempool_destroy(mempool_1M);
177 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
179 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
181 struct page *page, *ps, *pe;
183 ps = virt_to_page(buf);
184 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
186 for (page = ps; page < pe; page++)
187 ClearPageReserved(page);
189 if ((order == ORDER_64KB) && likely(mempool_64K))
190 mempool_free((void *)buf, mempool_64K);
191 else if ((order == ORDER_1MB) && likely(mempool_1M))
192 mempool_free((void *)buf, mempool_1M);
194 free_pages(buf, order);
200 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
203 unsigned long sz_left;
206 int prot_pmd, prot_pte;
209 "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
212 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
213 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
215 pmdp = pmd_offset(pgd_offset_k(virt), virt);
216 if (pmd_none(*pmdp)) {
217 ptep = pte_alloc_one_kernel(&init_mm, 0);
220 /* note: two PMDs will be set */
221 pmd_populate_kernel(&init_mm, pmdp, ptep);
226 sz_left >= PAGE_SIZE;
227 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
228 ptep = pte_offset_kernel(pmdp, virt);
229 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
236 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
238 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
240 unsigned long sz_left;
245 "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
249 sz_left >= PAGE_SIZE;
250 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
251 pmdp = pmd_offset(pgd_offset_k(virt), virt);
252 ptep = pte_offset_kernel(pmdp, virt);
253 pte_clear(&init_mm, virt, ptep);
258 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
260 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
262 /* exmap_sem should be held before calling this function */
263 struct exmap_tbl *ent;
266 omap_mmu_for_each_tlb_entry(mmu, ent) {
268 unsigned long mapsize;
272 mapadr = (void *)ent->vadr;
273 mapsize = 1 << (ent->order + PAGE_SHIFT);
274 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
275 if (vadr + len <= mapadr + mapsize) {
276 /* this map covers whole address. */
280 * this map covers partially.
281 * check rest portion.
283 len -= mapadr + mapsize - vadr;
284 vadr = mapadr + mapsize;
292 EXPORT_SYMBOL_GPL(exmap_valid);
295 * omap_mmu_exmap_use(), unuse():
296 * when the mapped area is exported to user space with mmap,
297 * the usecount is incremented.
298 * while the usecount > 0, that area can't be released.
300 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
302 struct exmap_tbl *ent;
304 down_write(&mmu->exmap_sem);
305 omap_mmu_for_each_tlb_entry(mmu, ent) {
307 unsigned long mapsize;
311 mapadr = (void *)ent->vadr;
312 mapsize = 1 << (ent->order + PAGE_SHIFT);
313 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
316 up_write(&mmu->exmap_sem);
318 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
320 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
322 struct exmap_tbl *ent;
324 down_write(&mmu->exmap_sem);
325 omap_mmu_for_each_tlb_entry(mmu, ent) {
327 unsigned long mapsize;
331 mapadr = (void *)ent->vadr;
332 mapsize = 1 << (ent->order + PAGE_SHIFT);
333 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
336 up_write(&mmu->exmap_sem);
338 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
341 * omap_mmu_virt_to_phys()
342 * returns physical address, and sets len to valid length
345 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
347 struct exmap_tbl *ent;
349 if (omap_mmu_internal_memory(mmu, vadr)) {
350 unsigned long addr = (unsigned long)vadr;
351 *len = mmu->membase + mmu->memsize - addr;
356 omap_mmu_for_each_tlb_entry(mmu, ent) {
358 unsigned long mapsize;
362 mapadr = (void *)ent->vadr;
363 mapsize = 1 << (ent->order + PAGE_SHIFT);
364 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
365 *len = mapadr + mapsize - vadr;
366 return __pa(ent->buf) + vadr - mapadr;
370 /* valid mapping not found */
373 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
379 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
380 unsigned long phys, int prot)
382 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
383 if (virt & (1 << SECTION_SHIFT))
385 *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
386 flush_pmd_entry(pmdp);
390 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
391 unsigned long phys, int prot)
394 for (i = 0; i < 16; i += 1) {
395 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
396 virt += (PGDIR_SIZE / 2);
401 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
402 unsigned long phys, pgprot_t prot)
405 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
407 if (!(prot & PTE_TYPE_MASK))
408 prot |= PTE_TYPE_SMALL;
410 if (pmd_none(*pmdp)) {
411 ptep = pte_alloc_one_kernel(mm, virt);
414 pmd_populate_kernel(mm, pmdp, ptep);
416 ptep = pte_offset_kernel(pmdp, virt);
417 ptep -= PTRS_PER_PTE;
418 *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
419 flush_pmd_entry((pmd_t *)ptep);
424 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
425 unsigned long phys, pgprot_t prot)
428 for (i = 0; i < 16; i += 1) {
429 ret = omap_mmu_alloc_page(mm, virt, phys,
430 prot | PTE_TYPE_LARGE);
432 return -ENOMEM; /* only 1st time */
438 static int omap_mmu_load_pte(struct omap_mmu *mmu,
439 struct omap_mmu_tlb_entry *e)
442 struct mm_struct *mm = mmu->twl_mm;
443 const unsigned long va = e->va;
444 const unsigned long pa = e->pa;
445 const pgprot_t prot = mmu->ops->pte_get_attr(e);
447 spin_lock(&mm->page_table_lock);
450 case OMAP_MMU_CAM_PAGESIZE_16MB:
451 omap_mmu_alloc_supersection(mm, va, pa, prot);
453 case OMAP_MMU_CAM_PAGESIZE_1MB:
454 omap_mmu_alloc_section(mm, va, pa, prot);
456 case OMAP_MMU_CAM_PAGESIZE_64KB:
457 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
459 case OMAP_MMU_CAM_PAGESIZE_4KB:
460 ret = omap_mmu_alloc_page(mm, va, pa, prot);
467 spin_unlock(&mm->page_table_lock);
472 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
476 struct mm_struct *mm = mmu->twl_mm;
478 spin_lock(&mm->page_table_lock);
480 pmdp = pmd_offset(pgd_offset(mm, virt), virt);
485 if (!pmd_table(*pmdp))
488 ptep = pte_offset_kernel(pmdp, virt);
489 pte_clear(mm, virt, ptep);
490 flush_pmd_entry((pmd_t *)ptep);
493 end = pmd_page_vaddr(*pmdp);
494 ptep = end - PTRS_PER_PTE;
496 if (!pte_none(*ptep))
500 pte_free_kernel(pmd_page_vaddr(*pmdp));
504 flush_pmd_entry(pmdp);
506 spin_unlock(&mm->page_table_lock);
512 static struct cam_ram_regset *
513 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
515 return mmu->ops->cam_ram_alloc(entry);
518 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
519 struct cam_ram_regset *cr)
521 return mmu->ops->cam_ram_valid(cr);
525 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
527 unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
530 mask = (mmu->type == OMAP_MMU_CAMERA) ?
531 CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
532 tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
534 mask = (mmu->type == OMAP_MMU_CAMERA) ?
535 CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
536 tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
540 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
542 omap_mmu_write_reg(mmu,
543 (lock->base << MMU_LOCK_BASE_SHIFT) |
544 (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
547 static inline void omap_mmu_flush(struct omap_mmu *mmu)
549 omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
552 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
554 omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
557 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
558 struct cam_ram_regset *cr)
561 omap_mmu_set_tlb_lock(mmu, lock);
563 if (likely(mmu->ops->read_tlb))
564 mmu->ops->read_tlb(mmu, cr);
566 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
568 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
570 if (likely(mmu->ops->load_tlb))
571 mmu->ops->load_tlb(mmu, cr);
573 /* flush the entry */
576 /* load a TLB entry */
580 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
581 struct omap_mmu_tlb_entry *entry)
583 struct omap_mmu_tlb_lock lock;
584 struct cam_ram_regset *cr;
586 clk_enable(mmu->clk);
587 omap_dsp_request_mem();
589 omap_mmu_get_tlb_lock(mmu, &lock);
590 for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
591 struct cam_ram_regset tmp;
593 /* read a TLB entry */
594 omap_mmu_read_tlb(mmu, &lock, &tmp);
595 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
598 omap_mmu_set_tlb_lock(mmu, &lock);
601 /* The last entry cannot be locked? */
602 if (lock.victim == (mmu->nr_tlb_entries - 1)) {
603 printk(KERN_ERR "MMU: TLB is full.\n");
607 cr = omap_mmu_cam_ram_alloc(mmu, entry);
611 omap_mmu_load_tlb(mmu, cr);
614 /* update lock base */
615 if (lock.victim == lock.base)
618 omap_mmu_set_tlb_lock(mmu, &lock);
620 omap_dsp_release_mem();
621 clk_disable(mmu->clk);
624 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
626 static inline unsigned long
627 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
629 return mmu->ops->cam_va(cr);
632 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
634 struct omap_mmu_tlb_lock lock;
638 clk_enable(mmu->clk);
639 omap_dsp_request_mem();
641 omap_mmu_get_tlb_lock(mmu, &lock);
642 for (i = 0; i < lock.base; i++) {
643 struct cam_ram_regset cr;
645 /* read a TLB entry */
647 omap_mmu_read_tlb(mmu, &lock, &cr);
648 if (!omap_mmu_cam_ram_valid(mmu, &cr))
651 if (omap_mmu_cam_va(mmu, &cr) == vadr)
652 /* flush the entry */
658 /* set new lock base */
659 lock.base = lock.victim = max_valid + 1;
660 omap_mmu_set_tlb_lock(mmu, &lock);
662 omap_dsp_release_mem();
663 clk_disable(mmu->clk);
666 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
668 static void omap_mmu_gflush(struct omap_mmu *mmu)
670 struct omap_mmu_tlb_lock lock;
672 clk_enable(mmu->clk);
673 omap_dsp_request_mem();
675 omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
676 lock.base = lock.victim = mmu->nr_exmap_preserved;
677 omap_mmu_set_tlb_lock(mmu, &lock);
679 omap_dsp_release_mem();
680 clk_disable(mmu->clk);
683 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
684 struct omap_mmu_tlb_entry *entry)
687 if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
688 /*XXX use PG_flag for prsvd */
689 ret = omap_mmu_load_pte(mmu, entry);
694 ret = omap_mmu_load_tlb_entry(mmu, entry);
697 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
699 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
701 int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
704 if (mmu->ops->pte_get_attr)
705 omap_mmu_clear_pte(mmu, vadr);
708 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
713 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
714 * In this case, the buffer for DSP is allocated in this routine,
716 * On the other hand, for example - frame buffer sharing, calls
717 * this function with padr set. It means some known address space
718 * pointed with padr is going to be shared with DSP.
720 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
721 unsigned long padr, unsigned long size,
722 enum exmap_type type)
726 unsigned int order = 0;
729 unsigned long _dspadr = dspadr;
730 unsigned long _padr = padr;
731 void *_vadr = omap_mmu_to_virt(mmu, dspadr);
732 unsigned long _size = size;
733 struct omap_mmu_tlb_entry tlb_ent;
734 struct exmap_tbl *exmap_ent, *tmp_ent;
738 #define MINIMUM_PAGESZ SZ_4K
742 if (!is_aligned(size, MINIMUM_PAGESZ)) {
744 "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
747 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
749 "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
752 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
754 "MMU: physical address(0x%lx) is not aligned.\n",
759 /* address validity check */
760 if ((dspadr < mmu->memsize) ||
761 (dspadr >= (1 << mmu->addrspace))) {
763 "MMU: illegal address/size for %s().\n",
768 down_write(&mmu->exmap_sem);
771 omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
772 unsigned long mapsize;
776 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
777 if ((_vadr + size > tmp_ent->vadr) &&
778 (_vadr < tmp_ent->vadr + mapsize)) {
779 printk(KERN_ERR "MMU: exmap page overlap!\n");
780 up_write(&mmu->exmap_sem);
787 /* Are there any free TLB lines? */
788 for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
789 if (!mmu->exmap_tbl[idx].valid)
792 printk(KERN_ERR "MMU: DSP TLB is full.\n");
797 exmap_ent = mmu->exmap_tbl + idx;
799 if ((_size >= SZ_1M) &&
800 (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
801 is_aligned(_dspadr, SZ_1M)) {
803 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
804 } else if ((_size >= SZ_64K) &&
805 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
806 is_aligned(_dspadr, SZ_64K)) {
808 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
811 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
814 order = get_order(unit);
816 /* buffer allocation */
817 if (type == EXMAP_TYPE_MEM) {
818 struct page *page, *ps, *pe;
820 if ((order == ORDER_1MB) && likely(mempool_1M))
821 buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
822 else if ((order == ORDER_64KB) && likely(mempool_64K))
823 buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
825 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
832 /* mark the pages as reserved; this is needed for mmap */
833 ps = virt_to_page(buf);
834 pe = virt_to_page(buf + unit);
836 for (page = ps; page < pe; page++)
837 SetPageReserved(page);
843 * mapping for ARM MMU:
844 * we should not access to the allocated memory through 'buf'
845 * since this area should not be cached.
847 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
851 /* loading DSP PTE entry */
852 INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
853 status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
855 exmap_clear_armmmu((unsigned long)_vadr, unit);
859 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
860 exmap_ent->link.prev = prev;
862 mmu->exmap_tbl[prev].link.next = idx;
864 if ((_size -= unit) == 0) { /* normal completion */
865 up_write(&mmu->exmap_sem);
871 _padr = padr ? _padr + unit : 0;
876 up_write(&mmu->exmap_sem);
878 omap_mmu_free_pages((unsigned long)buf, order);
879 omap_mmu_exunmap(mmu, dspadr);
882 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
884 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
888 /* clearing ARM MMU */
889 size = 1 << (ent->order + PAGE_SHIFT);
890 exmap_clear_armmmu((unsigned long)ent->vadr, size);
892 /* freeing allocated memory */
893 if (ent->type == EXMAP_TYPE_MEM) {
894 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
896 "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
904 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
909 struct exmap_tbl *ent;
912 vadr = omap_mmu_to_virt(mmu, dspadr);
913 down_write(&mmu->exmap_sem);
914 for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
915 ent = mmu->exmap_tbl + idx;
916 if (!ent->valid || ent->prsvd)
918 if (ent->vadr == vadr)
921 up_write(&mmu->exmap_sem);
923 "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
927 if (ent->usecount > 0) {
929 "MMU: exmap reference count is not 0.\n"
930 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
931 idx, ent->vadr, ent->order, ent->usecount);
932 up_write(&mmu->exmap_sem);
935 /* clearing DSP PTE entry */
936 omap_mmu_clear_pte_entry(mmu, dspadr);
938 /* clear ARM MMU and free buffer */
939 size = unmap_free_arm(ent);
942 /* we don't free PTEs */
945 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
947 /* check if next mapping is in same group */
948 idx = ent->link.next;
950 goto up_out; /* normal completion */
951 ent = mmu->exmap_tbl + idx;
954 if (ent->vadr == vadr)
955 goto found_map; /* continue */
958 "MMU: illegal exmap_tbl grouping!\n"
959 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
960 vadr, idx, ent->vadr);
961 up_write(&mmu->exmap_sem);
965 up_write(&mmu->exmap_sem);
968 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
970 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
972 struct exmap_tbl *ent;
974 down_write(&mmu->exmap_sem);
976 /* clearing TLB entry */
977 omap_mmu_gflush(mmu);
979 omap_mmu_for_each_tlb_entry(mmu, ent)
980 if (ent->valid && !ent->prsvd)
984 if (likely(mmu->membase))
985 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
986 mmu->membase + (1 << mmu->addrspace));
988 up_write(&mmu->exmap_sem);
990 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
992 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
993 unsigned long dspadr, int index)
997 struct omap_mmu_tlb_entry tlb_ent;
1000 virt = omap_mmu_to_virt(mmu, dspadr);
1001 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1002 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1003 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
1004 omap_mmu_load_pte_entry(mmu, &tlb_ent);
1006 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1008 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
1010 void *virt = omap_mmu_to_virt(mmu, dspadr);
1012 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
1013 /* DSP MMU is shutting down. not handled here. */
1015 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1017 static void omap_mmu_reset(struct omap_mmu *mmu)
1021 omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
1023 for (i = 0; i < 10000; i++)
1024 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
1028 void omap_mmu_disable(struct omap_mmu *mmu)
1030 omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
1032 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1034 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1036 u32 val = MMU_CNTL_MMUENABLE;
1037 u32 pa = (u32)virt_to_phys(mmu->twl_mm->pgd);
1040 omap_mmu_reset(mmu);
1042 if (mmu->ops->pte_get_attr) {
1043 omap_mmu_write_reg(mmu, pa, MMU_TTB);
1044 val |= MMU_CNTL_TWLENABLE;
1047 omap_mmu_write_reg(mmu, val, MMU_CNTL);
1049 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1051 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1053 struct omap_mmu *mmu = dev_id;
1055 if (likely(mmu->ops->interrupt))
1056 mmu->ops->interrupt(mmu);
1061 static int omap_mmu_init(struct omap_mmu *mmu)
1063 struct omap_mmu_tlb_lock tlb_lock;
1066 clk_enable(mmu->clk);
1067 omap_dsp_request_mem();
1068 down_write(&mmu->exmap_sem);
1070 ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1074 "failed to register MMU interrupt: %d\n", ret);
1078 omap_mmu_disable(mmu); /* clear all */
1080 omap_mmu_enable(mmu, 1);
1082 memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1083 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1085 if (unlikely(mmu->ops->startup))
1086 ret = mmu->ops->startup(mmu);
1088 up_write(&mmu->exmap_sem);
1089 omap_dsp_release_mem();
1090 clk_disable(mmu->clk);
1095 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1097 free_irq(mmu->irq, mmu);
1099 if (unlikely(mmu->ops->shutdown))
1100 mmu->ops->shutdown(mmu);
1102 omap_mmu_exmap_flush(mmu);
1103 omap_mmu_disable(mmu); /* clear all */
1107 * omap_mmu_mem_enable() / disable()
1109 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1111 if (unlikely(mmu->ops->mem_enable))
1112 return mmu->ops->mem_enable(mmu, addr);
1114 down_read(&mmu->exmap_sem);
1117 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1119 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1121 if (unlikely(mmu->ops->mem_disable)) {
1122 mmu->ops->mem_disable(mmu, addr);
1126 up_read(&mmu->exmap_sem);
1128 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1131 * dsp_mem file operations
1133 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1136 unsigned long p = *ppos;
1137 void *vadr = omap_mmu_to_virt(mmu, p);
1138 ssize_t size = mmu->memsize;
1143 clk_enable(mmu->memclk);
1145 if (count > size - p)
1147 if (copy_to_user(buf, vadr, read)) {
1153 clk_disable(mmu->memclk);
1157 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1160 unsigned long p = *ppos;
1161 void *vadr = omap_mmu_to_virt(mmu, p);
1163 if (!exmap_valid(mmu, vadr, count)) {
1165 "MMU: DSP address %08lx / size %08x "
1166 "is not valid!\n", p, count);
1169 if (count > (1 << mmu->addrspace) - p)
1170 count = (1 << mmu->addrspace) - p;
1171 if (copy_to_user(buf, vadr, count))
1178 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
1179 loff_t offset, size_t count)
1181 struct device *dev = to_dev(kobj);
1182 struct omap_mmu *mmu = dev_get_drvdata(dev);
1183 unsigned long p = (unsigned long)offset;
1184 void *vadr = omap_mmu_to_virt(mmu, p);
1187 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1190 if (p < mmu->memsize)
1191 ret = intmem_read(mmu, buf, count, &offset);
1193 ret = exmem_read(mmu, buf, count, &offset);
1195 omap_mmu_mem_disable(mmu, vadr);
1200 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1203 unsigned long p = *ppos;
1204 void *vadr = omap_mmu_to_virt(mmu, p);
1205 ssize_t size = mmu->memsize;
1210 clk_enable(mmu->memclk);
1212 if (count > size - p)
1214 if (copy_from_user(vadr, buf, written)) {
1220 clk_disable(mmu->memclk);
1224 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1227 unsigned long p = *ppos;
1228 void *vadr = omap_mmu_to_virt(mmu, p);
1230 if (!exmap_valid(mmu, vadr, count)) {
1232 "MMU: DSP address %08lx / size %08x "
1233 "is not valid!\n", p, count);
1236 if (count > (1 << mmu->addrspace) - p)
1237 count = (1 << mmu->addrspace) - p;
1238 if (copy_from_user(vadr, buf, count))
1245 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1246 loff_t offset, size_t count)
1248 struct device *dev = to_dev(kobj);
1249 struct omap_mmu *mmu = dev_get_drvdata(dev);
1250 unsigned long p = (unsigned long)offset;
1251 void *vadr = omap_mmu_to_virt(mmu, p);
1254 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1257 if (p < mmu->memsize)
1258 ret = intmem_write(mmu, buf, count, &offset);
1260 ret = exmem_write(mmu, buf, count, &offset);
1262 omap_mmu_mem_disable(mmu, vadr);
1267 static struct bin_attribute dev_attr_mem = {
1270 .owner = THIS_MODULE,
1271 .mode = S_IRUSR | S_IWUSR | S_IRGRP,
1274 .read = omap_mmu_mem_read,
1275 .write = omap_mmu_mem_write,
1278 /* To be obsolete for backward compatibility */
1279 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1280 loff_t offset, size_t count)
1282 return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1284 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1286 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1287 loff_t offset, size_t count)
1289 return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1291 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1296 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1299 struct omap_mmu *mmu = dev_get_drvdata(dev);
1300 struct omap_mmu_tlb_lock tlb_lock;
1303 clk_enable(mmu->clk);
1304 omap_dsp_request_mem();
1306 down_read(&mmu->exmap_sem);
1308 omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1310 if (likely(mmu->ops->show))
1311 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1313 /* restore victim entry */
1314 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1316 up_read(&mmu->exmap_sem);
1317 omap_dsp_release_mem();
1318 clk_disable(mmu->clk);
1323 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1325 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1328 struct omap_mmu *mmu = dev_get_drvdata(dev);
1329 struct exmap_tbl *ent;
1333 down_read(&mmu->exmap_sem);
1334 len = sprintf(buf, " dspadr size buf size uc\n");
1335 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
1337 omap_mmu_for_each_tlb_entry(mmu, ent) {
1340 enum exmap_type type;
1343 /* find a top of link */
1344 if (!ent->valid || (ent->link.prev >= 0))
1352 ent = mmu->exmap_tbl + idx;
1353 size += PAGE_SIZE << ent->order;
1354 } while ((idx = ent->link.next) >= 0);
1356 len += sprintf(buf + len, "0x%06lx %#8lx",
1357 virt_to_omap_mmu(mmu, vadr), size);
1359 if (type == EXMAP_TYPE_FB) {
1360 len += sprintf(buf + len, " framebuf\n");
1362 len += sprintf(buf + len, "\n");
1365 ent = mmu->exmap_tbl + idx;
1366 len += sprintf(buf + len,
1367 /* 0xc0171000 0x100000 0*/
1368 "%19s0x%8p %#8lx %2d\n",
1370 PAGE_SIZE << ent->order,
1372 } while ((idx = ent->link.next) >= 0);
1378 up_read(&mmu->exmap_sem);
1382 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1386 struct omap_mmu *mmu = dev_get_drvdata(dev);
1387 unsigned long base = 0, len = 0;
1390 sscanf(buf, "%lx %lx", &base, &len);
1396 /* Add the mapping */
1397 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1401 /* Remove the mapping */
1402 ret = omap_mmu_exunmap(mmu, base);
1410 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1412 static ssize_t mempool_show(struct class *class, char *buf)
1414 int min_nr_1M = 0, curr_nr_1M = 0;
1415 int min_nr_64K = 0, curr_nr_64K = 0;
1418 if (likely(mempool_1M)) {
1419 min_nr_1M = mempool_1M->min_nr;
1420 curr_nr_1M = mempool_1M->curr_nr;
1421 total += min_nr_1M * SZ_1M;
1423 if (likely(mempool_64K)) {
1424 min_nr_64K = mempool_64K->min_nr;
1425 curr_nr_64K = mempool_64K->curr_nr;
1426 total += min_nr_64K * SZ_64K;
1431 "1M buffer: %d (%d free)\n"
1432 "64K buffer: %d (%d free)\n",
1433 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1437 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1439 static void omap_mmu_class_dev_release(struct device *dev)
1443 static struct class omap_mmu_class = {
1445 .dev_release = omap_mmu_class_dev_release,
1448 int omap_mmu_register(struct omap_mmu *mmu)
1452 mmu->dev.class = &omap_mmu_class;
1453 strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1454 dev_set_drvdata(&mmu->dev, mmu);
1456 mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1458 if (!mmu->exmap_tbl)
1461 if (mmu->ops->pte_get_attr) {
1462 struct mm_struct *mm = mm_alloc();
1470 ret = device_register(&mmu->dev);
1472 goto err_dev_register;
1474 init_rwsem(&mmu->exmap_sem);
1476 ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1477 printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1478 mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1480 ret = omap_mmu_init(mmu);
1484 ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1486 goto err_dev_create_mmu;
1487 ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1489 goto err_dev_create_exmap;
1491 if (likely(mmu->membase)) {
1492 dev_attr_mem.size = mmu->memsize;
1493 ret = device_create_bin_file(&mmu->dev,
1496 goto err_bin_create_mem;
1502 device_remove_file(&mmu->dev, &dev_attr_exmap);
1503 err_dev_create_exmap:
1504 device_remove_file(&mmu->dev, &dev_attr_mmu);
1506 omap_mmu_shutdown(mmu);
1508 device_unregister(&mmu->dev);
1513 kfree(mmu->exmap_tbl);
1514 mmu->exmap_tbl = NULL;
1517 EXPORT_SYMBOL_GPL(omap_mmu_register);
1519 void omap_mmu_unregister(struct omap_mmu *mmu)
1521 omap_mmu_shutdown(mmu);
1522 omap_mmu_kmem_release();
1524 device_remove_file(&mmu->dev, &dev_attr_mmu);
1525 device_remove_file(&mmu->dev, &dev_attr_exmap);
1527 if (likely(mmu->membase))
1528 device_remove_bin_file(&mmu->dev,
1531 kfree(mmu->exmap_tbl);
1532 mmu->exmap_tbl = NULL;
1534 if (mmu->ops->pte_get_attr) {
1536 __mmdrop(mmu->twl_mm);
1541 device_unregister(&mmu->dev);
1543 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1545 static int __init omap_mmu_class_init(void)
1547 int ret = class_register(&omap_mmu_class);
1549 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1554 static void __exit omap_mmu_class_exit(void)
1556 class_remove_file(&omap_mmu_class, &class_attr_mempool);
1557 class_unregister(&omap_mmu_class);
1560 subsys_initcall(omap_mmu_class_init);
1561 module_exit(omap_mmu_class_exit);
1563 MODULE_LICENSE("GPL");