2 * linux/arch/arm/plat-omap/mmu.c
4 * OMAP MMU management framework
6 * Copyright (C) 2002-2006 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <lethal@linux-sh.org>
11 * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
17 #include <linux/module.h>
18 #include <linux/mempool.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/clk.h>
23 #include <linux/device.h>
24 #include <linux/interrupt.h>
25 #include <linux/uaccess.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <asm/sizes.h>
31 #include <mach/dsp_common.h>
33 #if defined(CONFIG_ARCH_OMAP1)
34 #include "../mach-omap1/mmu.h"
35 #elif defined(CONFIG_ARCH_OMAP2)
36 #include "../mach-omap2/mmu.h"
40 * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
41 * MMU has base and victim implemented in different bits in the LOCK
42 * register (shifts are still the same), all of the other registers are
43 * the same on all of the MMUs..
45 #define MMU_LOCK_BASE_SHIFT 10
46 #define MMU_LOCK_VICTIM_SHIFT 4
48 #define CAMERA_MMU_LOCK_BASE_MASK (0x7 << MMU_LOCK_BASE_SHIFT)
49 #define CAMERA_MMU_LOCK_VICTIM_MASK (0x7 << MMU_LOCK_VICTIM_SHIFT)
51 #define is_aligned(adr, align) (!((adr)&((align)-1)))
52 #define ORDER_1MB (20 - PAGE_SHIFT)
53 #define ORDER_64KB (16 - PAGE_SHIFT)
54 #define ORDER_4KB (12 - PAGE_SHIFT)
56 #define MMU_CNTL_EMUTLBUPDATE (1<<3)
57 #define MMU_CNTL_TWLENABLE (1<<2)
58 #define MMU_CNTL_MMUENABLE (1<<1)
60 static mempool_t *mempool_1M;
61 static mempool_t *mempool_64K;
63 #define omap_mmu_for_each_tlb_entry(mmu, entry) \
64 for (entry = mmu->exmap_tbl; prefetch(entry + 1), \
65 entry < (mmu->exmap_tbl + mmu->nr_tlb_entries); \
68 #define to_dev(obj) container_of(obj, struct device, kobj)
70 static void *mempool_alloc_from_pool(mempool_t *pool,
71 unsigned int __nocast gfp_mask)
73 spin_lock_irq(&pool->lock);
74 if (likely(pool->curr_nr)) {
75 void *element = pool->elements[--pool->curr_nr];
76 spin_unlock_irq(&pool->lock);
80 spin_unlock_irq(&pool->lock);
81 return mempool_alloc(pool, gfp_mask);
85 * kmem_reserve(), kmem_release():
86 * reserve or release kernel memory for exmap().
88 * exmap() might request consecutive 1MB or 64kB,
89 * but it will be difficult after memory pages are fragmented.
90 * So, user can reserve such memory blocks in the early phase
91 * through kmem_reserve().
93 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
95 return (void *)__get_dma_pages(gfp, (unsigned int)order);
98 static void omap_mmu_pool_free(void *buf, void *order)
100 free_pages((unsigned long)buf, (unsigned int)order);
103 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
105 unsigned long len = size;
107 /* alignment check */
108 if (!is_aligned(size, SZ_64K)) {
110 "MMU %s: size(0x%lx) is not multiple of 64KB.\n",
115 if (size > (1 << mmu->addrspace)) {
117 "MMU %s: size(0x%lx) is larger than external device "
118 " memory space size (0x%x.\n", mmu->name, size,
119 (1 << mmu->addrspace));
126 if (likely(!mempool_1M))
127 mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
131 mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
134 size &= ~(0xf << 20);
137 if (size >= SZ_64K) {
140 if (likely(!mempool_64K))
141 mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
145 mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
148 size &= ~(0xf << 16);
156 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
158 void omap_mmu_kmem_release(void)
161 mempool_destroy(mempool_64K);
166 mempool_destroy(mempool_1M);
170 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
172 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
174 struct page *page, *ps, *pe;
176 ps = virt_to_page(buf);
177 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
179 for (page = ps; page < pe; page++)
180 ClearPageReserved(page);
182 if ((order == ORDER_64KB) && likely(mempool_64K))
183 mempool_free((void *)buf, mempool_64K);
184 else if ((order == ORDER_1MB) && likely(mempool_1M))
185 mempool_free((void *)buf, mempool_1M);
187 free_pages(buf, order);
193 int exmap_set_armmmu(struct omap_mmu *mmu, unsigned long virt,
194 unsigned long phys, unsigned long size)
197 unsigned long sz_left;
200 int prot_pmd, prot_pte;
203 "MMU %s: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
204 mmu->name, virt, phys, size);
206 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
207 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
209 pmdp = pmd_offset(pgd_offset_k(virt), virt);
210 if (pmd_none(*pmdp)) {
211 ptep = pte_alloc_one_kernel(&init_mm, 0);
214 /* note: two PMDs will be set */
215 pmd_populate_kernel(&init_mm, pmdp, ptep);
220 sz_left >= PAGE_SIZE;
221 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
222 ptep = pte_offset_kernel(pmdp, virt);
223 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
230 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
232 void exmap_clear_armmmu(struct omap_mmu *mmu, unsigned long virt,
235 unsigned long sz_left;
240 "MMU %s: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
241 mmu->name, virt, size);
244 sz_left >= PAGE_SIZE;
245 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
246 pmdp = pmd_offset(pgd_offset_k(virt), virt);
247 ptep = pte_offset_kernel(pmdp, virt);
248 pte_clear(&init_mm, virt, ptep);
253 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
255 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
257 /* exmap_sem should be held before calling this function */
258 struct exmap_tbl *ent;
261 omap_mmu_for_each_tlb_entry(mmu, ent) {
263 unsigned long mapsize;
267 mapadr = (void *)ent->vadr;
268 mapsize = 1 << (ent->order + PAGE_SHIFT);
269 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
270 if (vadr + len <= mapadr + mapsize) {
271 /* this map covers whole address. */
275 * this map covers partially.
276 * check rest portion.
278 len -= mapadr + mapsize - vadr;
279 vadr = mapadr + mapsize;
287 EXPORT_SYMBOL_GPL(exmap_valid);
290 * omap_mmu_exmap_use(), unuse():
291 * when the mapped area is exported to user space with mmap,
292 * the usecount is incremented.
293 * while the usecount > 0, that area can't be released.
295 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
297 struct exmap_tbl *ent;
299 down_write(&mmu->exmap_sem);
300 omap_mmu_for_each_tlb_entry(mmu, ent) {
302 unsigned long mapsize;
306 mapadr = (void *)ent->vadr;
307 mapsize = 1 << (ent->order + PAGE_SHIFT);
308 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
311 up_write(&mmu->exmap_sem);
313 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
315 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
317 struct exmap_tbl *ent;
319 down_write(&mmu->exmap_sem);
320 omap_mmu_for_each_tlb_entry(mmu, ent) {
322 unsigned long mapsize;
326 mapadr = (void *)ent->vadr;
327 mapsize = 1 << (ent->order + PAGE_SHIFT);
328 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
331 up_write(&mmu->exmap_sem);
333 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
336 * omap_mmu_virt_to_phys()
337 * returns physical address, and sets len to valid length
340 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
342 struct exmap_tbl *ent;
344 if (omap_mmu_internal_memory(mmu, vadr)) {
345 unsigned long addr = (unsigned long)vadr;
346 *len = mmu->membase + mmu->memsize - addr;
351 omap_mmu_for_each_tlb_entry(mmu, ent) {
353 unsigned long mapsize;
357 mapadr = (void *)ent->vadr;
358 mapsize = 1 << (ent->order + PAGE_SHIFT);
359 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
360 *len = mapadr + mapsize - vadr;
361 return __pa(ent->buf) + vadr - mapadr;
365 /* valid mapping not found */
368 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
374 omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
375 unsigned long phys, int prot)
377 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
378 if (virt & (1 << SECTION_SHIFT))
380 *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
381 flush_pmd_entry(pmdp);
385 omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
386 unsigned long phys, int prot)
389 for (i = 0; i < 16; i += 1) {
390 omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
391 virt += (PGDIR_SIZE / 2);
396 omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
397 unsigned long phys, pgprot_t prot)
400 pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
402 if (!(prot & PTE_TYPE_MASK))
403 prot |= PTE_TYPE_SMALL;
405 if (pmd_none(*pmdp)) {
406 ptep = pte_alloc_one_kernel(mm, virt);
409 pmd_populate_kernel(mm, pmdp, ptep);
411 ptep = pte_offset_kernel(pmdp, virt);
412 ptep -= PTRS_PER_PTE;
413 *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
414 flush_pmd_entry((pmd_t *)ptep);
419 omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
420 unsigned long phys, pgprot_t prot)
423 for (i = 0; i < 16; i += 1) {
424 ret = omap_mmu_alloc_page(mm, virt, phys,
425 prot | PTE_TYPE_LARGE);
427 return -ENOMEM; /* only 1st time */
433 static int omap_mmu_load_pte(struct omap_mmu *mmu,
434 struct omap_mmu_tlb_entry *e)
437 struct mm_struct *mm = mmu->twl_mm;
438 const unsigned long va = e->va;
439 const unsigned long pa = e->pa;
440 const pgprot_t prot = mmu->ops->pte_get_attr(e);
442 spin_lock(&mm->page_table_lock);
445 case OMAP_MMU_CAM_PAGESIZE_16MB:
446 omap_mmu_alloc_supersection(mm, va, pa, prot);
448 case OMAP_MMU_CAM_PAGESIZE_1MB:
449 omap_mmu_alloc_section(mm, va, pa, prot);
451 case OMAP_MMU_CAM_PAGESIZE_64KB:
452 ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
454 case OMAP_MMU_CAM_PAGESIZE_4KB:
455 ret = omap_mmu_alloc_page(mm, va, pa, prot);
462 spin_unlock(&mm->page_table_lock);
467 static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
471 struct mm_struct *mm = mmu->twl_mm;
473 spin_lock(&mm->page_table_lock);
475 pmdp = pmd_offset(pgd_offset(mm, virt), virt);
480 if (!pmd_table(*pmdp))
483 ptep = pte_offset_kernel(pmdp, virt);
484 pte_clear(mm, virt, ptep);
485 flush_pmd_entry((pmd_t *)ptep);
488 end = pmd_page_vaddr(*pmdp);
489 ptep = end - PTRS_PER_PTE;
491 if (!pte_none(*ptep))
495 pte_free_kernel(mm, pmd_page_vaddr(*pmdp));
499 flush_pmd_entry(pmdp);
501 spin_unlock(&mm->page_table_lock);
507 static struct cam_ram_regset *
508 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
510 return mmu->ops->cam_ram_alloc(mmu, entry);
513 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
514 struct cam_ram_regset *cr)
516 return mmu->ops->cam_ram_valid(cr);
520 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
522 unsigned long lock = omap_mmu_read_reg(mmu, OMAP_MMU_LOCK);
525 mask = (mmu->type == OMAP_MMU_CAMERA) ?
526 CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
527 tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
529 mask = (mmu->type == OMAP_MMU_CAMERA) ?
530 CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
531 tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
535 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
537 omap_mmu_write_reg(mmu,
538 (lock->base << MMU_LOCK_BASE_SHIFT) |
539 (lock->victim << MMU_LOCK_VICTIM_SHIFT),
543 static inline void omap_mmu_flush(struct omap_mmu *mmu)
545 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_FLUSH_ENTRY);
548 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
550 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_LD_TLB);
553 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
554 struct cam_ram_regset *cr)
557 omap_mmu_set_tlb_lock(mmu, lock);
559 if (likely(mmu->ops->read_tlb))
560 mmu->ops->read_tlb(mmu, cr);
562 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
564 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
566 if (likely(mmu->ops->load_tlb))
567 mmu->ops->load_tlb(mmu, cr);
569 /* flush the entry */
572 /* load a TLB entry */
576 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
577 struct omap_mmu_tlb_entry *entry)
579 struct omap_mmu_tlb_lock lock;
580 struct cam_ram_regset *cr;
583 clk_enable(mmu->clk);
584 ret = omap_dsp_request_mem();
588 omap_mmu_get_tlb_lock(mmu, &lock);
589 for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
590 struct cam_ram_regset tmp;
592 /* read a TLB entry */
593 omap_mmu_read_tlb(mmu, &lock, &tmp);
594 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
597 omap_mmu_set_tlb_lock(mmu, &lock);
600 /* The last entry cannot be locked? */
601 if (lock.victim == (mmu->nr_tlb_entries - 1)) {
602 dev_err(mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
606 cr = omap_mmu_cam_ram_alloc(mmu, entry);
610 omap_mmu_load_tlb(mmu, cr);
613 /* update lock base */
614 if (lock.victim == lock.base)
617 omap_mmu_set_tlb_lock(mmu, &lock);
619 omap_dsp_release_mem();
621 clk_disable(mmu->clk);
624 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
626 static inline unsigned long
627 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
629 return mmu->ops->cam_va(cr);
632 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
634 struct omap_mmu_tlb_lock lock;
638 clk_enable(mmu->clk);
639 ret = omap_dsp_request_mem();
643 omap_mmu_get_tlb_lock(mmu, &lock);
644 for (i = 0; i < lock.base; i++) {
645 struct cam_ram_regset cr;
647 /* read a TLB entry */
649 omap_mmu_read_tlb(mmu, &lock, &cr);
650 if (!omap_mmu_cam_ram_valid(mmu, &cr))
653 if (omap_mmu_cam_va(mmu, &cr) == vadr)
654 /* flush the entry */
660 /* set new lock base */
661 lock.base = lock.victim = max_valid + 1;
662 omap_mmu_set_tlb_lock(mmu, &lock);
664 omap_dsp_release_mem();
666 clk_disable(mmu->clk);
669 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
671 static void omap_mmu_gflush(struct omap_mmu *mmu)
673 struct omap_mmu_tlb_lock lock;
676 clk_enable(mmu->clk);
677 ret = omap_dsp_request_mem();
681 omap_mmu_write_reg(mmu, 0x1, OMAP_MMU_GFLUSH);
682 lock.base = lock.victim = mmu->nr_exmap_preserved;
683 omap_mmu_set_tlb_lock(mmu, &lock);
685 omap_dsp_release_mem();
687 clk_disable(mmu->clk);
690 int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
691 struct omap_mmu_tlb_entry *entry)
694 /*XXX use PG_flag for prsvd */
695 ret = omap_mmu_load_pte(mmu, entry);
699 ret = omap_mmu_load_tlb_entry(mmu, entry);
702 EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
704 int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
706 int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
709 omap_mmu_clear_pte(mmu, vadr);
712 EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
717 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
718 * In this case, the buffer for external device is allocated in this routine,
720 * On the other hand, for example - frame buffer sharing, calls
721 * this function with padr set. It means some known address space
722 * pointed with padr is going to be shared with external device.
724 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long devadr,
725 unsigned long padr, unsigned long size,
726 enum exmap_type type)
730 unsigned int order = 0;
733 unsigned long _devadr = devadr;
734 unsigned long _padr = padr;
735 void *_vadr = omap_mmu_to_virt(mmu, devadr);
736 unsigned long _size = size;
737 struct omap_mmu_tlb_entry tlb_ent;
738 struct exmap_tbl *exmap_ent, *tmp_ent;
742 #define MINIMUM_PAGESZ SZ_4K
746 if (!is_aligned(size, MINIMUM_PAGESZ)) {
748 "MMU %s: size(0x%lx) is not multiple of 4KB.\n",
752 if (!is_aligned(devadr, MINIMUM_PAGESZ)) {
754 "MMU %s: external device address(0x%lx) is not"
755 " aligned.\n", mmu->name, devadr);
758 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
760 "MMU %s: physical address(0x%lx) is not aligned.\n",
765 /* address validity check */
766 if ((devadr < mmu->memsize) ||
767 (devadr >= (1 << mmu->addrspace))) {
769 "MMU %s: illegal address/size for %s().\n",
770 mmu->name, __func__);
774 down_write(&mmu->exmap_sem);
777 omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
778 unsigned long mapsize;
782 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
783 if ((_vadr + size > tmp_ent->vadr) &&
784 (_vadr < tmp_ent->vadr + mapsize)) {
785 dev_err(mmu->dev, "MMU %s: exmap page overlap!\n",
787 up_write(&mmu->exmap_sem);
794 /* Are there any free TLB lines? */
795 for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
796 if (!mmu->exmap_tbl[idx].valid)
799 dev_err(mmu->dev, "MMU %s: TLB is full.\n", mmu->name);
804 exmap_ent = mmu->exmap_tbl + idx;
806 if ((_size >= SZ_1M) &&
807 (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
808 is_aligned(_devadr, SZ_1M)) {
810 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
811 } else if ((_size >= SZ_64K) &&
812 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
813 is_aligned(_devadr, SZ_64K)) {
815 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
818 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
821 order = get_order(unit);
823 /* buffer allocation */
824 if (type == EXMAP_TYPE_MEM) {
825 struct page *page, *ps, *pe;
827 if ((order == ORDER_1MB) && likely(mempool_1M))
828 buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
829 else if ((order == ORDER_64KB) && likely(mempool_64K))
830 buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
832 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
839 /* mark the pages as reserved; this is needed for mmap */
840 ps = virt_to_page(buf);
841 pe = virt_to_page(buf + unit);
843 for (page = ps; page < pe; page++)
844 SetPageReserved(page);
850 * mapping for ARM MMU:
851 * we should not access to the allocated memory through 'buf'
852 * since this area should not be cached.
854 status = exmap_set_armmmu(mmu, (unsigned long)_vadr, _padr, unit);
858 /* loading external device PTE entry */
859 INIT_TLB_ENTRY(&tlb_ent, _devadr, _padr, pgsz);
860 status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
862 exmap_clear_armmmu(mmu, (unsigned long)_vadr, unit);
866 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
867 exmap_ent->link.prev = prev;
869 mmu->exmap_tbl[prev].link.next = idx;
871 if ((_size -= unit) == 0) { /* normal completion */
872 up_write(&mmu->exmap_sem);
878 _padr = padr ? _padr + unit : 0;
883 up_write(&mmu->exmap_sem);
885 omap_mmu_free_pages((unsigned long)buf, order);
886 omap_mmu_exunmap(mmu, devadr);
889 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
891 static unsigned long unmap_free_arm(struct omap_mmu *mmu,
892 struct exmap_tbl *ent)
896 /* clearing ARM MMU */
897 size = 1 << (ent->order + PAGE_SHIFT);
898 exmap_clear_armmmu(mmu, (unsigned long)ent->vadr, size);
900 /* freeing allocated memory */
901 if (ent->type == EXMAP_TYPE_MEM) {
902 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
903 dev_dbg(mmu->dev, "MMU %s: freeing 0x%lx bytes @ adr 0x%8p\n",
904 mmu->name, size, ent->buf);
911 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long devadr)
916 struct exmap_tbl *ent;
919 vadr = omap_mmu_to_virt(mmu, devadr);
920 down_write(&mmu->exmap_sem);
921 for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
922 ent = mmu->exmap_tbl + idx;
923 if (!ent->valid || ent->prsvd)
925 if (ent->vadr == vadr)
928 up_write(&mmu->exmap_sem);
929 dev_warn(mmu->dev, "MMU %s: address %06lx not found in exmap_tbl.\n",
934 if (ent->usecount > 0) {
935 dev_err(mmu->dev, "MMU %s: exmap reference count is not 0.\n"
936 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
937 mmu->name, idx, ent->vadr, ent->order, ent->usecount);
938 up_write(&mmu->exmap_sem);
941 /* clearing external device PTE entry */
942 omap_mmu_clear_pte_entry(mmu, devadr);
944 /* clear ARM MMU and free buffer */
945 size = unmap_free_arm(mmu, ent);
948 /* we don't free PTEs */
951 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
953 /* check if next mapping is in same group */
954 idx = ent->link.next;
956 goto up_out; /* normal completion */
957 ent = mmu->exmap_tbl + idx;
960 if (ent->vadr == vadr)
961 goto found_map; /* continue */
963 dev_err(mmu->dev, "MMU %s: illegal exmap_tbl grouping!\n"
964 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
965 mmu->name, vadr, idx, ent->vadr);
966 up_write(&mmu->exmap_sem);
970 up_write(&mmu->exmap_sem);
973 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
975 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
977 struct exmap_tbl *ent;
979 down_write(&mmu->exmap_sem);
981 /* clearing TLB entry */
982 omap_mmu_gflush(mmu);
984 omap_mmu_for_each_tlb_entry(mmu, ent)
985 if (ent->valid && !ent->prsvd)
986 unmap_free_arm(mmu, ent);
989 if (likely(mmu->membase))
990 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
991 mmu->membase + (1 << mmu->addrspace));
993 up_write(&mmu->exmap_sem);
995 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
997 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
998 unsigned long devadr, int index)
1002 struct omap_mmu_tlb_entry tlb_ent;
1005 virt = omap_mmu_to_virt(mmu, devadr);
1006 exmap_set_armmmu(mmu, (unsigned long)virt, phys, PAGE_SIZE);
1007 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
1008 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, devadr, phys);
1009 omap_mmu_load_pte_entry(mmu, &tlb_ent);
1011 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
1013 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long devadr)
1015 void *virt = omap_mmu_to_virt(mmu, devadr);
1017 exmap_clear_armmmu(mmu, (unsigned long)virt, PAGE_SIZE);
1018 /* DSP MMU is shutting down. not handled here. */
1020 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
1022 static void omap_mmu_reset(struct omap_mmu *mmu)
1024 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1027 omap_mmu_write_reg(mmu, 0x2, OMAP_MMU_SYSCONFIG);
1029 for (i = 0; i < 10000; i++)
1030 if (likely(omap_mmu_read_reg(mmu, OMAP_MMU_SYSSTATUS) & 0x1))
1035 void omap_mmu_disable(struct omap_mmu *mmu)
1037 omap_mmu_write_reg(mmu, 0x00, OMAP_MMU_CNTL);
1039 EXPORT_SYMBOL_GPL(omap_mmu_disable);
1041 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
1043 u32 val = OMAP_MMU_CNTL_MMU_EN | MMU_CNTL_TWLENABLE;
1046 omap_mmu_reset(mmu);
1047 #if defined(CONFIG_ARCH_OMAP2) /* FIXME */
1048 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd),
1051 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) & 0xffff,
1053 omap_mmu_write_reg(mmu, (u32)virt_to_phys(mmu->twl_mm->pgd) >> 16,
1055 val |= OMAP_MMU_CNTL_RESET_SW;
1057 omap_mmu_write_reg(mmu, val, OMAP_MMU_CNTL);
1059 EXPORT_SYMBOL_GPL(omap_mmu_enable);
1061 static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
1063 struct omap_mmu *mmu = dev_id;
1065 if (likely(mmu->ops->interrupt))
1066 mmu->ops->interrupt(mmu);
1071 static int omap_mmu_init(struct omap_mmu *mmu)
1073 struct omap_mmu_tlb_lock tlb_lock;
1076 clk_enable(mmu->clk);
1077 ret = omap_dsp_request_mem();
1081 down_write(&mmu->exmap_sem);
1083 ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
1086 dev_err(mmu->dev, "MMU %s: failed to register MMU interrupt:"
1087 " %d\n", mmu->name, ret);
1091 omap_mmu_disable(mmu); /* clear all */
1093 omap_mmu_enable(mmu, 1);
1095 memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
1096 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1098 if (unlikely(mmu->ops->startup))
1099 ret = mmu->ops->startup(mmu);
1101 up_write(&mmu->exmap_sem);
1102 omap_dsp_release_mem();
1104 clk_disable(mmu->clk);
1109 static void omap_mmu_shutdown(struct omap_mmu *mmu)
1111 free_irq(mmu->irq, mmu);
1113 if (unlikely(mmu->ops->shutdown))
1114 mmu->ops->shutdown(mmu);
1116 omap_mmu_exmap_flush(mmu);
1117 omap_mmu_disable(mmu); /* clear all */
1121 * omap_mmu_mem_enable() / disable()
1123 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
1125 if (unlikely(mmu->ops->mem_enable))
1126 return mmu->ops->mem_enable(mmu, addr);
1128 down_read(&mmu->exmap_sem);
1131 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
1133 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
1135 if (unlikely(mmu->ops->mem_disable)) {
1136 mmu->ops->mem_disable(mmu, addr);
1140 up_read(&mmu->exmap_sem);
1142 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
1145 * dsp_mem file operations
1147 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1150 unsigned long p = *ppos;
1151 void *vadr = omap_mmu_to_virt(mmu, p);
1152 ssize_t size = mmu->memsize;
1157 clk_enable(mmu->memclk);
1159 if (count > size - p)
1161 if (copy_to_user(buf, vadr, read)) {
1167 clk_disable(mmu->memclk);
1171 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
1174 unsigned long p = *ppos;
1175 void *vadr = omap_mmu_to_virt(mmu, p);
1177 if (!exmap_valid(mmu, vadr, count)) {
1178 dev_err(mmu->dev, "MMU %s: external device address %08lx / "
1179 "size %08x is not valid!\n", mmu->name, p, count);
1182 if (count > (1 << mmu->addrspace) - p)
1183 count = (1 << mmu->addrspace) - p;
1184 if (copy_to_user(buf, vadr, count))
1191 static ssize_t omap_mmu_mem_read(struct kobject *kobj,
1192 struct bin_attribute *attr,
1193 char *buf, loff_t offset, size_t count)
1195 struct device *dev = to_dev(kobj);
1196 struct omap_mmu *mmu = dev_get_drvdata(dev);
1197 unsigned long p = (unsigned long)offset;
1198 void *vadr = omap_mmu_to_virt(mmu, p);
1201 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1204 if (p < mmu->memsize)
1205 ret = intmem_read(mmu, buf, count, &offset);
1207 ret = exmem_read(mmu, buf, count, &offset);
1209 omap_mmu_mem_disable(mmu, vadr);
1214 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1217 unsigned long p = *ppos;
1218 void *vadr = omap_mmu_to_virt(mmu, p);
1219 ssize_t size = mmu->memsize;
1224 clk_enable(mmu->memclk);
1226 if (count > size - p)
1228 if (copy_from_user(vadr, buf, written)) {
1234 clk_disable(mmu->memclk);
1238 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1241 unsigned long p = *ppos;
1242 void *vadr = omap_mmu_to_virt(mmu, p);
1244 if (!exmap_valid(mmu, vadr, count)) {
1245 dev_err(mmu->dev, "MMU %s: external device address %08lx "
1246 "/ size %08x is not valid!\n", mmu->name, p, count);
1249 if (count > (1 << mmu->addrspace) - p)
1250 count = (1 << mmu->addrspace) - p;
1251 if (copy_from_user(vadr, buf, count))
1258 static ssize_t omap_mmu_mem_write(struct kobject *kobj,
1259 struct bin_attribute *attr,
1260 char *buf, loff_t offset, size_t count)
1262 struct device *dev = to_dev(kobj);
1263 struct omap_mmu *mmu = dev_get_drvdata(dev);
1264 unsigned long p = (unsigned long)offset;
1265 void *vadr = omap_mmu_to_virt(mmu, p);
1268 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1271 if (p < mmu->memsize)
1272 ret = intmem_write(mmu, buf, count, &offset);
1274 ret = exmem_write(mmu, buf, count, &offset);
1276 omap_mmu_mem_disable(mmu, vadr);
1281 static struct bin_attribute dev_attr_mem = {
1284 .owner = THIS_MODULE,
1285 .mode = S_IRUSR | S_IWUSR | S_IRGRP,
1288 .read = omap_mmu_mem_read,
1289 .write = omap_mmu_mem_write,
1292 /* To be obsolete for backward compatibility */
1293 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu,
1294 struct bin_attribute *attr,
1295 char *buf, loff_t offset, size_t count)
1297 return omap_mmu_mem_read(&mmu->dev->kobj, attr, buf, offset, count);
1299 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1301 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu,
1302 struct bin_attribute *attr,
1303 char *buf, loff_t offset, size_t count)
1305 return omap_mmu_mem_write(&mmu->dev->kobj, attr, buf, offset, count);
1307 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1312 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1315 struct omap_mmu *mmu = dev_get_drvdata(dev);
1316 struct omap_mmu_tlb_lock tlb_lock;
1319 clk_enable(mmu->clk);
1320 ret = omap_dsp_request_mem();
1324 down_read(&mmu->exmap_sem);
1326 omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1329 if (likely(mmu->ops->show))
1330 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1332 /* restore victim entry */
1333 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1335 up_read(&mmu->exmap_sem);
1336 omap_dsp_release_mem();
1338 clk_disable(mmu->clk);
1343 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1345 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1348 struct omap_mmu *mmu = dev_get_drvdata(dev);
1349 struct exmap_tbl *ent;
1353 down_read(&mmu->exmap_sem);
1354 len = sprintf(buf, " devadr size buf size uc\n");
1355 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
1357 omap_mmu_for_each_tlb_entry(mmu, ent) {
1360 enum exmap_type type;
1363 /* find a top of link */
1364 if (!ent->valid || (ent->link.prev >= 0))
1372 ent = mmu->exmap_tbl + idx;
1373 size += PAGE_SIZE << ent->order;
1374 } while ((idx = ent->link.next) >= 0);
1376 len += sprintf(buf + len, "0x%06lx %#8lx",
1377 virt_to_omap_mmu(mmu, vadr), size);
1379 if (type == EXMAP_TYPE_FB) {
1380 len += sprintf(buf + len, " framebuf\n");
1382 len += sprintf(buf + len, "\n");
1385 ent = mmu->exmap_tbl + idx;
1386 len += sprintf(buf + len,
1387 /* 0xc0171000 0x100000 0*/
1388 "%19s0x%8p %#8lx %2d\n",
1390 PAGE_SIZE << ent->order,
1392 } while ((idx = ent->link.next) >= 0);
1398 up_read(&mmu->exmap_sem);
1402 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1406 struct omap_mmu *mmu = dev_get_drvdata(dev);
1407 unsigned long base = 0, len = 0;
1410 sscanf(buf, "%lx %lx", &base, &len);
1416 /* Add the mapping */
1417 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1421 /* Remove the mapping */
1422 ret = omap_mmu_exunmap(mmu, base);
1430 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1432 static ssize_t mempool_show(struct class *class, char *buf)
1434 int min_nr_1M = 0, curr_nr_1M = 0;
1435 int min_nr_64K = 0, curr_nr_64K = 0;
1438 if (likely(mempool_1M)) {
1439 min_nr_1M = mempool_1M->min_nr;
1440 curr_nr_1M = mempool_1M->curr_nr;
1441 total += min_nr_1M * SZ_1M;
1443 if (likely(mempool_64K)) {
1444 min_nr_64K = mempool_64K->min_nr;
1445 curr_nr_64K = mempool_64K->curr_nr;
1446 total += min_nr_64K * SZ_64K;
1451 "1M buffer: %d (%d free)\n"
1452 "64K buffer: %d (%d free)\n",
1453 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1457 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1459 static struct class omap_mmu_class = {
1463 int omap_mmu_register(struct omap_mmu *mmu)
1467 mmu->dev = device_create(&omap_mmu_class, NULL, 0, "%s", mmu->name);
1468 if (unlikely(IS_ERR(mmu->dev)))
1469 return PTR_ERR(mmu->dev);
1470 dev_set_drvdata(mmu->dev, mmu);
1472 mmu->exmap_tbl = kcalloc(mmu->nr_tlb_entries, sizeof(struct exmap_tbl),
1474 if (!mmu->exmap_tbl)
1477 mmu->twl_mm = mm_alloc();
1483 init_rwsem(&mmu->exmap_sem);
1485 ret = omap_mmu_init(mmu);
1489 ret = device_create_file(mmu->dev, &dev_attr_mmu);
1491 goto err_dev_create_mmu;
1492 ret = device_create_file(mmu->dev, &dev_attr_exmap);
1494 goto err_dev_create_exmap;
1496 if (likely(mmu->membase)) {
1497 dev_attr_mem.size = mmu->memsize;
1498 ret = device_create_bin_file(mmu->dev,
1501 goto err_bin_create_mem;
1506 device_remove_file(mmu->dev, &dev_attr_exmap);
1507 err_dev_create_exmap:
1508 device_remove_file(mmu->dev, &dev_attr_mmu);
1510 omap_mmu_shutdown(mmu);
1515 kfree(mmu->exmap_tbl);
1516 mmu->exmap_tbl = NULL;
1517 device_unregister(mmu->dev);
1520 EXPORT_SYMBOL_GPL(omap_mmu_register);
1522 void omap_mmu_unregister(struct omap_mmu *mmu)
1524 omap_mmu_shutdown(mmu);
1525 omap_mmu_kmem_release();
1527 device_remove_file(mmu->dev, &dev_attr_mmu);
1528 device_remove_file(mmu->dev, &dev_attr_exmap);
1530 if (likely(mmu->membase))
1531 device_remove_bin_file(mmu->dev, &dev_attr_mem);
1533 device_unregister(mmu->dev);
1535 kfree(mmu->exmap_tbl);
1536 mmu->exmap_tbl = NULL;
1539 __mmdrop(mmu->twl_mm);
1543 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1545 static int __init omap_mmu_class_init(void)
1547 int ret = class_register(&omap_mmu_class);
1549 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1554 static void __exit omap_mmu_class_exit(void)
1556 class_remove_file(&omap_mmu_class, &class_attr_mempool);
1557 class_unregister(&omap_mmu_class);
1560 subsys_initcall(omap_mmu_class_init);
1561 module_exit(omap_mmu_class_exit);
1563 MODULE_LICENSE("GPL");