2 * linux/arch/arm/plat-omap/mmu.c
4 * OMAP MMU management framework
6 * Copyright (C) 2002-2006 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <lethal@linux-sh.org>
11 * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/module.h>
28 #include <linux/mempool.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/err.h>
32 #include <linux/clk.h>
33 #include <linux/device.h>
34 #include <linux/interrupt.h>
35 #include <asm/uaccess.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/arch/mmu.h>
40 #include <asm/sizes.h>
41 #include <asm/arch/dsp_common.h>
43 #if defined(CONFIG_ARCH_OMAP1)
44 #include "../mach-omap1/mmu.h"
45 #elif defined(CONFIG_ARCH_OMAP2)
46 #include "../mach-omap2/mmu.h"
50 * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
51 * MMU has base and victim implemented in different bits in the LOCK
52 * register (shifts are still the same), all of the other registers are
53 * the same on all of the MMUs..
55 #define MMU_LOCK_BASE_SHIFT 10
56 #define MMU_LOCK_VICTIM_SHIFT 4
58 #define CAMERA_MMU_LOCK_BASE_MASK (0x7 << MMU_LOCK_BASE_SHIFT)
59 #define CAMERA_MMU_LOCK_VICTIM_MASK (0x7 << MMU_LOCK_VICTIM_SHIFT)
61 #define is_aligned(adr,align) (!((adr)&((align)-1)))
62 #define ORDER_1MB (20 - PAGE_SHIFT)
63 #define ORDER_64KB (16 - PAGE_SHIFT)
64 #define ORDER_4KB (12 - PAGE_SHIFT)
66 #define MMU_CNTL_EMUTLBUPDATE (1<<3)
67 #define MMU_CNTL_TWLENABLE (1<<2)
68 #define MMU_CNTL_MMUENABLE (1<<1)
70 static mempool_t *mempool_1M;
71 static mempool_t *mempool_64K;
73 #define omap_mmu_for_each_tlb_entry(mmu, entry) \
74 for (entry = mmu->exmap_tbl; prefetch(entry + 1), \
75 entry < (mmu->exmap_tbl + mmu->nr_tlb_entries); \
78 #define to_dev(obj) container_of(obj, struct device, kobj)
80 static void *mempool_alloc_from_pool(mempool_t *pool,
81 unsigned int __nocast gfp_mask)
83 spin_lock_irq(&pool->lock);
84 if (likely(pool->curr_nr)) {
85 void *element = pool->elements[--pool->curr_nr];
86 spin_unlock_irq(&pool->lock);
90 spin_unlock_irq(&pool->lock);
91 return mempool_alloc(pool, gfp_mask);
95 * kmem_reserve(), kmem_release():
96 * reserve or release kernel memory for exmap().
98 * exmap() might request consecutive 1MB or 64kB,
99 * but it will be difficult after memory pages are fragmented.
100 * So, user can reserve such memory blocks in the early phase
101 * through kmem_reserve().
103 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
105 return (void *)__get_dma_pages(gfp, (unsigned int)order);
108 static void omap_mmu_pool_free(void *buf, void *order)
110 free_pages((unsigned long)buf, (unsigned int)order);
113 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
115 unsigned long len = size;
117 /* alignment check */
118 if (!is_aligned(size, SZ_64K)) {
120 "MMU %s: size(0x%lx) is not multiple of 64KB.\n",
125 if (size > (1 << mmu->addrspace)) {
127 "MMU %s: size(0x%lx) is larger than external device "
128 " memory space size (0x%x.\n", mmu->name, size,
129 (1 << mmu->addrspace));
136 if (likely(!mempool_1M))
137 mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
141 mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
144 size &= ~(0xf << 20);
147 if (size >= SZ_64K) {
150 if (likely(!mempool_64K))
151 mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
155 mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
158 size &= ~(0xf << 16);
166 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
168 void omap_mmu_kmem_release(void)
171 mempool_destroy(mempool_64K);
176 mempool_destroy(mempool_1M);
180 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
182 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
184 struct page *page, *ps, *pe;
186 ps = virt_to_page(buf);
187 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
189 for (page = ps; page < pe; page++)
190 ClearPageReserved(page);
192 if ((order == ORDER_64KB) && likely(mempool_64K))
193 mempool_free((void *)buf, mempool_64K);
194 else if ((order == ORDER_1MB) && likely(mempool_1M))
195 mempool_free((void *)buf, mempool_1M);
197 free_pages(buf, order);
203 int exmap_set_armmmu(struct omap_mmu *mmu, unsigned long virt,
204 unsigned long phys, unsigned long size)
207 unsigned long sz_left;
210 int prot_pmd, prot_pte;
213 "MMU %s: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
214 mmu->name, virt, phys, size);
216 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
217 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
219 pmdp = pmd_offset(pgd_offset_k(virt), virt);
220 if (pmd_none(*pmdp)) {
221 ptep = pte_alloc_one_kernel(&init_mm, 0);
224 /* note: two PMDs will be set */
225 pmd_populate_kernel(&init_mm, pmdp, ptep);
230 sz_left >= PAGE_SIZE;
231 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
232 ptep = pte_offset_kernel(pmdp, virt);
233 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
240 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
242 void exmap_clear_armmmu(struct omap_mmu *mmu, unsigned long virt,
245 unsigned long sz_left;
250 "MMU %s: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
251 mmu->name, virt, size);
254 sz_left >= PAGE_SIZE;
255 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
256 pmdp = pmd_offset(pgd_offset_k(virt), virt);
257 ptep = pte_offset_kernel(pmdp, virt);
258 pte_clear(&init_mm, virt, ptep);
263 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
265 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
267 /* exmap_sem should be held before calling this function */
268 struct exmap_tbl *ent;
271 omap_mmu_for_each_tlb_entry(mmu, ent) {
273 unsigned long mapsize;
277 mapadr = (void *)ent->vadr;
278 mapsize = 1 << (ent->order + PAGE_SHIFT);
279 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
280 if (vadr + len <= mapadr + mapsize) {
281 /* this map covers whole address. */
285 * this map covers partially.
286 * check rest portion.
288 len -= mapadr + mapsize - vadr;
289 vadr = mapadr + mapsize;
297 EXPORT_SYMBOL_GPL(exmap_valid);
300 * omap_mmu_exmap_use(), unuse():
301 * when the mapped area is exported to user space with mmap,
302 * the usecount is incremented.
303 * while the usecount > 0, that area can't be released.
305 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
307 struct exmap_tbl *ent;
309 down_write(&mmu->exmap_sem);
310 omap_mmu_for_each_tlb_entry(mmu, ent) {
312 unsigned long mapsize;
316 mapadr = (void *)ent->vadr;
317 mapsize = 1 << (ent->order + PAGE_SHIFT);
318 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
321 up_write(&mmu->exmap_sem);
323 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
325 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
327 struct exmap_tbl *ent;
329 down_write(&mmu->exmap_sem);
330 omap_mmu_for_each_tlb_entry(mmu, ent) {
332 unsigned long mapsize;
336 mapadr = (void *)ent->vadr;
337 mapsize = 1 << (ent->order + PAGE_SHIFT);
338 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
341 up_write(&mmu->exmap_sem);
343 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
346 * omap_mmu_virt_to_phys()
347 * returns physical address, and sets len to valid length
350 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
352 struct exmap_tbl *ent;
354 if (omap_mmu_internal_memory(mmu, vadr)) {
355 unsigned long addr = (unsigned long)vadr;
356 *len = mmu->membase + mmu->memsize - addr;
361 omap_mmu_for_each_tlb_entry(mmu, ent) {
363 unsigned long mapsize;
367 mapadr = (void *)ent->vadr;
368 mapsize = 1 << (ent->order + PAGE_SHIFT);
369 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
370 *len = mapadr + mapsize - vadr;
371 return __pa(ent->buf) + vadr - mapadr;
375 /* valid mapping not found */
378 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
384 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
385 unsigned long phys, int prot)
387 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
388 if (virt & (1 << SECTION_SHIFT))
390 *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
391 flush_pmd_entry(pmdp);
395 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
396 unsigned long phys, int prot)
399 for (i = 0; i < 16; i += 1) {
400 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
401 virt += (PGDIR_SIZE / 2);
406 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
407 unsigned long phys, pgprot_t prot)
410 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
412 if (!(prot & PTE_TYPE_MASK))
413 prot |= PTE_TYPE_SMALL;
415 if (pmd_none(*pmdp)) {
416 ptep = pte_alloc_one_kernel(mm, virt);
419 pmd_populate_kernel(mm, pmdp, ptep);
421 ptep = pte_offset_kernel(pmdp, virt);
422 ptep -= PTRS_PER_PTE;
423 *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
424 flush_pmd_entry((pmd_t *)ptep);
429 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
430 unsigned long phys, pgprot_t prot)
433 for (i = 0; i < 16; i += 1) {
434 ret = omap_mmu_alloc_page(mm, virt, phys,
435 prot | PTE_TYPE_LARGE);
437 return -ENOMEM; /* only 1st time */
443 static int omap_mmu_load_pte(struct omap_mmu *mmu,
444 struct omap_mmu_tlb_entry *e)
447 struct mm_struct *mm = mmu->twl_mm;
448 const unsigned long va = e->va;
449 const unsigned long pa = e->pa;
450 const pgprot_t prot = mmu->ops->pte_get_attr(e);
452 spin_lock(&mm->page_table_lock);
455 case OMAP_MMU_CAM_PAGESIZE_16MB:
456 omap_mmu_alloc_supersection(mm, va, pa, prot);
458 case OMAP_MMU_CAM_PAGESIZE_1MB:
459 omap_mmu_alloc_section(mm, va, pa, prot);
461 case OMAP_MMU_CAM_PAGESIZE_64KB:
462 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
464 case OMAP_MMU_CAM_PAGESIZE_4KB:
465 ret = omap_mmu_alloc_page(mm, va, pa, prot);
472 spin_unlock(&mm->page_table_lock);
477 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
481 struct mm_struct *mm = mmu->twl_mm;
483 spin_lock(&mm->page_table_lock);
485 pmdp = pmd_offset(pgd_offset(mm, virt), virt);
490 if (!pmd_table(*pmdp))
493 ptep = pte_offset_kernel(pmdp, virt);
494 pte_clear(mm, virt, ptep);
495 flush_pmd_entry((pmd_t *)ptep);
498 end = pmd_page_vaddr(*pmdp);
499 ptep = end - PTRS_PER_PTE;
501 if (!pte_none(*ptep))
505 pte_free_kernel(pmd_page_vaddr(*pmdp));
509 flush_pmd_entry(pmdp);
511 spin_unlock(&mm->page_table_lock);
517 static struct cam_ram_regset *
518 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
520 return mmu->ops->cam_ram_alloc(mmu, entry);
523 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
524 struct cam_ram_regset *cr)
526 return mmu->ops->cam_ram_valid(cr);
530 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
532 unsigned long lock = omap_mmu_read_reg(mmu, OMAP_MMU_LOCK);
535 mask = (mmu->type == OMAP_MMU_CAMERA) ?
536 CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
537 tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
539 mask = (mmu->type == OMAP_MMU_CAMERA) ?
540 CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
541 tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
545 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
547 omap_mmu_write_reg(mmu,
548 (lock->base << MMU_LOCK_BASE_SHIFT) |
549 (lock->victim << MMU_LOCK_VICTIM_SHIFT),
553 static inline void omap_mmu_flush(struct omap_mmu *mmu)
555 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_FLUSH_ENTRY);
558 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
560 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_LD_TLB);
563 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
564 struct cam_ram_regset *cr)
567 omap_mmu_set_tlb_lock(mmu, lock);
569 if (likely(mmu->ops->read_tlb))
570 mmu->ops->read_tlb(mmu, cr);
572 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
574 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
576 if (likely(mmu->ops->load_tlb))
577 mmu->ops->load_tlb(mmu, cr);
579 /* flush the entry */
582 /* load a TLB entry */
586 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
587 struct omap_mmu_tlb_entry *entry)
589 struct omap_mmu_tlb_lock lock;
590 struct cam_ram_regset *cr;
592 clk_enable(mmu->clk);
593 omap_dsp_request_mem();
595 omap_mmu_get_tlb_lock(mmu, &lock);
596 for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
597 struct cam_ram_regset tmp;
599 /* read a TLB entry */
600 omap_mmu_read_tlb(mmu, &lock, &tmp);
601 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
604 omap_mmu_set_tlb_lock(mmu, &lock);
607 /* The last entry cannot be locked? */
608 if (lock.victim == (mmu->nr_tlb_entries - 1)) {
609 dev_err(&mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
613 cr = omap_mmu_cam_ram_alloc(mmu, entry);
617 omap_mmu_load_tlb(mmu, cr);
620 /* update lock base */
621 if (lock.victim == lock.base)
624 omap_mmu_set_tlb_lock(mmu, &lock);
626 omap_dsp_release_mem();
627 clk_disable(mmu->clk);
630 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
632 static inline unsigned long
633 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
635 return mmu->ops->cam_va(cr);
638 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
640 struct omap_mmu_tlb_lock lock;
644 clk_enable(mmu->clk);
645 omap_dsp_request_mem();
647 omap_mmu_get_tlb_lock(mmu, &lock);
648 for (i = 0; i < lock.base; i++) {
649 struct cam_ram_regset cr;
651 /* read a TLB entry */
653 omap_mmu_read_tlb(mmu, &lock, &cr);
654 if (!omap_mmu_cam_ram_valid(mmu, &cr))
657 if (omap_mmu_cam_va(mmu, &cr) == vadr)
658 /* flush the entry */
664 /* set new lock base */
665 lock.base = lock.victim = max_valid + 1;
666 omap_mmu_set_tlb_lock(mmu, &lock);
668 omap_dsp_release_mem();
669 clk_disable(mmu->clk);
672 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
674 static void omap_mmu_gflush(struct omap_mmu *mmu)
676 struct omap_mmu_tlb_lock lock;
678 clk_enable(mmu->clk);
679 omap_dsp_request_mem();
681 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_GFLUSH);
682 lock.base = lock.victim = mmu->nr_exmap_preserved;
683 omap_mmu_set_tlb_lock(mmu, &lock);
685 omap_dsp_release_mem();
686 clk_disable(mmu->clk);
689 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
690 struct omap_mmu_tlb_entry *entry)
693 /*XXX use PG_flag for prsvd */
694 ret = omap_mmu_load_pte(mmu, entry);
698 ret = omap_mmu_load_tlb_entry(mmu, entry);
701 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
703 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
705 int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
708 omap_mmu_clear_pte(mmu, vadr);
711 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
716 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
717 * In this case, the buffer for external device is allocated in this routine,
719 * On the other hand, for example - frame buffer sharing, calls
720 * this function with padr set. It means some known address space
721 * pointed with padr is going to be shared with external device.
723 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long devadr,
724 unsigned long padr, unsigned long size,
725 enum exmap_type type)
729 unsigned int order = 0;
732 unsigned long _devadr = devadr;
733 unsigned long _padr = padr;
734 void *_vadr = omap_mmu_to_virt(mmu, devadr);
735 unsigned long _size = size;
736 struct omap_mmu_tlb_entry tlb_ent;
737 struct exmap_tbl *exmap_ent, *tmp_ent;
741 #define MINIMUM_PAGESZ SZ_4K
745 if (!is_aligned(size, MINIMUM_PAGESZ)) {
747 "MMU %s: size(0x%lx) is not multiple of 4KB.\n",
751 if (!is_aligned(devadr, MINIMUM_PAGESZ)) {
753 "MMU %s: external device address(0x%lx) is not"
754 " aligned.\n", mmu->name, devadr);
757 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
759 "MMU %s: physical address(0x%lx) is not aligned.\n",
764 /* address validity check */
765 if ((devadr < mmu->memsize) ||
766 (devadr >= (1 << mmu->addrspace))) {
768 "MMU %s: illegal address/size for %s().\n",
769 mmu->name, __FUNCTION__);
773 down_write(&mmu->exmap_sem);
776 omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
777 unsigned long mapsize;
781 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
782 if ((_vadr + size > tmp_ent->vadr) &&
783 (_vadr < tmp_ent->vadr + mapsize)) {
784 dev_err(&mmu->dev, "MMU %s: exmap page overlap!\n",
786 up_write(&mmu->exmap_sem);
793 /* Are there any free TLB lines? */
794 for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
795 if (!mmu->exmap_tbl[idx].valid)
798 dev_err(&mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
803 exmap_ent = mmu->exmap_tbl + idx;
805 if ((_size >= SZ_1M) &&
806 (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
807 is_aligned(_devadr, SZ_1M)) {
809 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
810 } else if ((_size >= SZ_64K) &&
811 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
812 is_aligned(_devadr, SZ_64K)) {
814 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
817 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
820 order = get_order(unit);
822 /* buffer allocation */
823 if (type == EXMAP_TYPE_MEM) {
824 struct page *page, *ps, *pe;
826 if ((order == ORDER_1MB) && likely(mempool_1M))
827 buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
828 else if ((order == ORDER_64KB) && likely(mempool_64K))
829 buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
831 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
838 /* mark the pages as reserved; this is needed for mmap */
839 ps = virt_to_page(buf);
840 pe = virt_to_page(buf + unit);
842 for (page = ps; page < pe; page++)
843 SetPageReserved(page);
849 * mapping for ARM MMU:
850 * we should not access to the allocated memory through 'buf'
851 * since this area should not be cached.
853 status = exmap_set_armmmu(mmu, (unsigned long)_vadr, _padr, unit);
857 /* loading external device PTE entry */
858 INIT_TLB_ENTRY(&tlb_ent, _devadr, _padr, pgsz);
859 status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
861 exmap_clear_armmmu(mmu, (unsigned long)_vadr, unit);
865 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
866 exmap_ent->link.prev = prev;
868 mmu->exmap_tbl[prev].link.next = idx;
870 if ((_size -= unit) == 0) { /* normal completion */
871 up_write(&mmu->exmap_sem);
877 _padr = padr ? _padr + unit : 0;
882 up_write(&mmu->exmap_sem);
884 omap_mmu_free_pages((unsigned long)buf, order);
885 omap_mmu_exunmap(mmu, devadr);
888 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
890 static unsigned long unmap_free_arm(struct omap_mmu *mmu,
891 struct exmap_tbl *ent)
895 /* clearing ARM MMU */
896 size = 1 << (ent->order + PAGE_SHIFT);
897 exmap_clear_armmmu(mmu, (unsigned long)ent->vadr, size);
899 /* freeing allocated memory */
900 if (ent->type == EXMAP_TYPE_MEM) {
901 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
902 dev_dbg(&mmu->dev, "MMU %s: freeing 0x%lx bytes @ adr 0x%8p\n",
903 mmu->name, size, ent->buf);
910 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long devadr)
915 struct exmap_tbl *ent;
918 vadr = omap_mmu_to_virt(mmu, devadr);
919 down_write(&mmu->exmap_sem);
920 for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
921 ent = mmu->exmap_tbl + idx;
922 if (!ent->valid || ent->prsvd)
924 if (ent->vadr == vadr)
927 up_write(&mmu->exmap_sem);
928 dev_warn(&mmu->dev, "MMU %s: address %06lx not found in exmap_tbl.\n",
933 if (ent->usecount > 0) {
934 dev_err(&mmu->dev, "MMU %s: exmap reference count is not 0.\n"
935 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
936 mmu->name, idx, ent->vadr, ent->order, ent->usecount);
937 up_write(&mmu->exmap_sem);
940 /* clearing external device PTE entry */
941 omap_mmu_clear_pte_entry(mmu, devadr);
943 /* clear ARM MMU and free buffer */
944 size = unmap_free_arm(mmu, ent);
947 /* we don't free PTEs */
950 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
952 /* check if next mapping is in same group */
953 idx = ent->link.next;
955 goto up_out; /* normal completion */
956 ent = mmu->exmap_tbl + idx;
959 if (ent->vadr == vadr)
960 goto found_map; /* continue */
962 dev_err(&mmu->dev, "MMU %s: illegal exmap_tbl grouping!\n"
963 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
964 mmu->name, vadr, idx, ent->vadr);
965 up_write(&mmu->exmap_sem);
969 up_write(&mmu->exmap_sem);
972 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
974 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
976 struct exmap_tbl *ent;
978 down_write(&mmu->exmap_sem);
980 /* clearing TLB entry */
981 omap_mmu_gflush(mmu);
983 omap_mmu_for_each_tlb_entry(mmu, ent)
984 if (ent->valid && !ent->prsvd)
985 unmap_free_arm(mmu, ent);
988 if (likely(mmu->membase))
989 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
990 mmu->membase + (1 << mmu->addrspace));
992 up_write(&mmu->exmap_sem);
994 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
996 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
997 unsigned long devadr, int index)
1001 struct omap_mmu_tlb_entry tlb_ent;
1004 virt = omap_mmu_to_virt(mmu, devadr);
1005 exmap_set_armmmu(mmu, (unsigned long)virt, phys, PAGE_SIZE);
1006 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1007 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, devadr, phys);
1008 omap_mmu_load_pte_entry(mmu, &tlb_ent);
1010 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1012 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long devadr)
1014 void *virt = omap_mmu_to_virt(mmu, devadr);
1016 exmap_clear_armmmu(mmu, (unsigned long)virt, PAGE_SIZE);
1017 /* DSP MMU is shutting down. not handled here. */
1019 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1021 static void omap_mmu_reset(struct omap_mmu *mmu)
1023 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1026 omap_mmu_write_reg(mmu, 0x2, OMAP_MMU_SYSCONFIG);
1028 for (i = 0; i < 10000; i++)
1029 if (likely(omap_mmu_read_reg(mmu, OMAP_MMU_SYSSTATUS) & 0x1))
1034 void omap_mmu_disable(struct omap_mmu *mmu)
1036 omap_mmu_write_reg(mmu, 0x00, OMAP_MMU_CNTL);
1038 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1040 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1042 u32 val = OMAP_MMU_CNTL_MMU_EN | MMU_CNTL_TWLENABLE;
1045 omap_mmu_reset(mmu);
1046 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1047 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd),
1050 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) & 0xffff,
1052 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) >> 16,
1054 val |= OMAP_MMU_CNTL_RESET_SW;
1056 omap_mmu_write_reg(mmu, val, OMAP_MMU_CNTL);
1058 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1060 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1062 struct omap_mmu *mmu = dev_id;
1064 if (likely(mmu->ops->interrupt))
1065 mmu->ops->interrupt(mmu);
1070 static int omap_mmu_init(struct omap_mmu *mmu)
1072 struct omap_mmu_tlb_lock tlb_lock;
1075 clk_enable(mmu->clk);
1076 omap_dsp_request_mem();
1077 down_write(&mmu->exmap_sem);
1079 ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1082 dev_err(&mmu->dev, "MMU %s: failed to register MMU interrupt:"
1083 " %d\n", mmu->name, ret);
1087 omap_mmu_disable(mmu); /* clear all */
1089 omap_mmu_enable(mmu, 1);
1091 memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1092 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1094 if (unlikely(mmu->ops->startup))
1095 ret = mmu->ops->startup(mmu);
1097 up_write(&mmu->exmap_sem);
1098 omap_dsp_release_mem();
1099 clk_disable(mmu->clk);
1104 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1106 free_irq(mmu->irq, mmu);
1108 if (unlikely(mmu->ops->shutdown))
1109 mmu->ops->shutdown(mmu);
1111 omap_mmu_exmap_flush(mmu);
1112 omap_mmu_disable(mmu); /* clear all */
1116 * omap_mmu_mem_enable() / disable()
1118 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1120 if (unlikely(mmu->ops->mem_enable))
1121 return mmu->ops->mem_enable(mmu, addr);
1123 down_read(&mmu->exmap_sem);
1126 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1128 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1130 if (unlikely(mmu->ops->mem_disable)) {
1131 mmu->ops->mem_disable(mmu, addr);
1135 up_read(&mmu->exmap_sem);
1137 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1140 * dsp_mem file operations
1142 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1145 unsigned long p = *ppos;
1146 void *vadr = omap_mmu_to_virt(mmu, p);
1147 ssize_t size = mmu->memsize;
1152 clk_enable(mmu->memclk);
1154 if (count > size - p)
1156 if (copy_to_user(buf, vadr, read)) {
1162 clk_disable(mmu->memclk);
1166 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1169 unsigned long p = *ppos;
1170 void *vadr = omap_mmu_to_virt(mmu, p);
1172 if (!exmap_valid(mmu, vadr, count)) {
1173 dev_err(&mmu->dev, "MMU %s: external device address %08lx / "
1174 "size %08x is not valid!\n", mmu->name, p, count);
1177 if (count > (1 << mmu->addrspace) - p)
1178 count = (1 << mmu->addrspace) - p;
1179 if (copy_to_user(buf, vadr, count))
1186 static ssize_t omap_mmu_mem_read(struct kobject *kobj,
1187 struct bin_attribute * attr,
1188 char *buf, loff_t offset, size_t count)
1190 struct device *dev = to_dev(kobj);
1191 struct omap_mmu *mmu = dev_get_drvdata(dev);
1192 unsigned long p = (unsigned long)offset;
1193 void *vadr = omap_mmu_to_virt(mmu, p);
1196 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1199 if (p < mmu->memsize)
1200 ret = intmem_read(mmu, buf, count, &offset);
1202 ret = exmem_read(mmu, buf, count, &offset);
1204 omap_mmu_mem_disable(mmu, vadr);
1209 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1212 unsigned long p = *ppos;
1213 void *vadr = omap_mmu_to_virt(mmu, p);
1214 ssize_t size = mmu->memsize;
1219 clk_enable(mmu->memclk);
1221 if (count > size - p)
1223 if (copy_from_user(vadr, buf, written)) {
1229 clk_disable(mmu->memclk);
1233 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1236 unsigned long p = *ppos;
1237 void *vadr = omap_mmu_to_virt(mmu, p);
1239 if (!exmap_valid(mmu, vadr, count)) {
1240 dev_err(&mmu->dev, "MMU %s: external device address %08lx "
1241 "/ size %08x is not valid!\n", mmu->name, p, count);
1244 if (count > (1 << mmu->addrspace) - p)
1245 count = (1 << mmu->addrspace) - p;
1246 if (copy_from_user(vadr, buf, count))
1253 static ssize_t omap_mmu_mem_write(struct kobject *kobj,
1254 struct bin_attribute * attr,
1255 char *buf, loff_t offset, size_t count)
1257 struct device *dev = to_dev(kobj);
1258 struct omap_mmu *mmu = dev_get_drvdata(dev);
1259 unsigned long p = (unsigned long)offset;
1260 void *vadr = omap_mmu_to_virt(mmu, p);
1263 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1266 if (p < mmu->memsize)
1267 ret = intmem_write(mmu, buf, count, &offset);
1269 ret = exmem_write(mmu, buf, count, &offset);
1271 omap_mmu_mem_disable(mmu, vadr);
1276 static struct bin_attribute dev_attr_mem = {
1279 .owner = THIS_MODULE,
1280 .mode = S_IRUSR | S_IWUSR | S_IRGRP,
1283 .read = omap_mmu_mem_read,
1284 .write = omap_mmu_mem_write,
1287 /* To be obsolete for backward compatibility */
1288 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu,
1289 struct bin_attribute * attr,
1290 char *buf, loff_t offset, size_t count)
1292 return omap_mmu_mem_read(&mmu->dev.kobj, attr, buf, offset, count);
1294 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1296 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu,
1297 struct bin_attribute * attr,
1298 char *buf, loff_t offset, size_t count)
1300 return omap_mmu_mem_write(&mmu->dev.kobj, attr, buf, offset, count);
1302 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1307 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1310 struct omap_mmu *mmu = dev_get_drvdata(dev);
1311 struct omap_mmu_tlb_lock tlb_lock;
1314 clk_enable(mmu->clk);
1315 omap_dsp_request_mem();
1317 down_read(&mmu->exmap_sem);
1319 omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1321 if (likely(mmu->ops->show))
1322 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1324 /* restore victim entry */
1325 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1327 up_read(&mmu->exmap_sem);
1328 omap_dsp_release_mem();
1329 clk_disable(mmu->clk);
1334 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1336 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1339 struct omap_mmu *mmu = dev_get_drvdata(dev);
1340 struct exmap_tbl *ent;
1344 down_read(&mmu->exmap_sem);
1345 len = sprintf(buf, " devadr size buf size uc\n");
1346 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
1348 omap_mmu_for_each_tlb_entry(mmu, ent) {
1351 enum exmap_type type;
1354 /* find a top of link */
1355 if (!ent->valid || (ent->link.prev >= 0))
1363 ent = mmu->exmap_tbl + idx;
1364 size += PAGE_SIZE << ent->order;
1365 } while ((idx = ent->link.next) >= 0);
1367 len += sprintf(buf + len, "0x%06lx %#8lx",
1368 virt_to_omap_mmu(mmu, vadr), size);
1370 if (type == EXMAP_TYPE_FB) {
1371 len += sprintf(buf + len, " framebuf\n");
1373 len += sprintf(buf + len, "\n");
1376 ent = mmu->exmap_tbl + idx;
1377 len += sprintf(buf + len,
1378 /* 0xc0171000 0x100000 0*/
1379 "%19s0x%8p %#8lx %2d\n",
1381 PAGE_SIZE << ent->order,
1383 } while ((idx = ent->link.next) >= 0);
1389 up_read(&mmu->exmap_sem);
1393 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1397 struct omap_mmu *mmu = dev_get_drvdata(dev);
1398 unsigned long base = 0, len = 0;
1401 sscanf(buf, "%lx %lx", &base, &len);
1407 /* Add the mapping */
1408 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1412 /* Remove the mapping */
1413 ret = omap_mmu_exunmap(mmu, base);
1421 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1423 static ssize_t mempool_show(struct class *class, char *buf)
1425 int min_nr_1M = 0, curr_nr_1M = 0;
1426 int min_nr_64K = 0, curr_nr_64K = 0;
1429 if (likely(mempool_1M)) {
1430 min_nr_1M = mempool_1M->min_nr;
1431 curr_nr_1M = mempool_1M->curr_nr;
1432 total += min_nr_1M * SZ_1M;
1434 if (likely(mempool_64K)) {
1435 min_nr_64K = mempool_64K->min_nr;
1436 curr_nr_64K = mempool_64K->curr_nr;
1437 total += min_nr_64K * SZ_64K;
1442 "1M buffer: %d (%d free)\n"
1443 "64K buffer: %d (%d free)\n",
1444 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1448 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1450 static void omap_mmu_class_dev_release(struct device *dev)
1454 static struct class omap_mmu_class = {
1456 .dev_release = omap_mmu_class_dev_release,
1459 int omap_mmu_register(struct omap_mmu *mmu)
1463 mmu->dev.class = &omap_mmu_class;
1464 strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1465 dev_set_drvdata(&mmu->dev, mmu);
1467 mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1469 if (!mmu->exmap_tbl)
1472 mmu->twl_mm = mm_alloc();
1478 ret = device_register(&mmu->dev);
1480 goto err_dev_register;
1482 init_rwsem(&mmu->exmap_sem);
1484 ret = omap_mmu_init(mmu);
1488 ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1490 goto err_dev_create_mmu;
1491 ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1493 goto err_dev_create_exmap;
1495 if (likely(mmu->membase)) {
1496 dev_attr_mem.size = mmu->memsize;
1497 ret = device_create_bin_file(&mmu->dev,
1500 goto err_bin_create_mem;
1506 device_remove_file(&mmu->dev, &dev_attr_exmap);
1507 err_dev_create_exmap:
1508 device_remove_file(&mmu->dev, &dev_attr_mmu);
1510 omap_mmu_shutdown(mmu);
1512 device_unregister(&mmu->dev);
1517 kfree(mmu->exmap_tbl);
1518 mmu->exmap_tbl = NULL;
1521 EXPORT_SYMBOL_GPL(omap_mmu_register);
1523 void omap_mmu_unregister(struct omap_mmu *mmu)
1525 omap_mmu_shutdown(mmu);
1526 omap_mmu_kmem_release();
1528 device_remove_file(&mmu->dev, &dev_attr_mmu);
1529 device_remove_file(&mmu->dev, &dev_attr_exmap);
1531 if (likely(mmu->membase))
1532 device_remove_bin_file(&mmu->dev,
1535 kfree(mmu->exmap_tbl);
1536 mmu->exmap_tbl = NULL;
1539 __mmdrop(mmu->twl_mm);
1543 device_unregister(&mmu->dev);
1545 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1547 static int __init omap_mmu_class_init(void)
1549 int ret = class_register(&omap_mmu_class);
1551 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1556 static void __exit omap_mmu_class_exit(void)
1558 class_remove_file(&omap_mmu_class, &class_attr_mempool);
1559 class_unregister(&omap_mmu_class);
1562 subsys_initcall(omap_mmu_class_init);
1563 module_exit(omap_mmu_class_exit);
1565 MODULE_LICENSE("GPL");