2 * linux/arch/arm/plat-omap/mmu.c
4 * OMAP MMU management framework
6 * Copyright (C) 2002-2005 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <paul.mundt@nokia.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/clk.h>
30 #include <linux/device.h>
31 #include <asm/uaccess.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/arch/mmu.h>
36 #include <asm/sizes.h>
38 #if defined(CONFIG_ARCH_OMAP1)
39 #include "../mach-omap1/mmu.h"
40 #elif defined(CONFIG_ARCH_OMAP2)
41 #include "../mach-omap2/mmu.h"
45 * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
46 * MMU has base and victim implemented in different bits in the LOCK
47 * register (shifts are still the same), all of the other registers are
48 * the same on all of the MMUs..
50 #define MMU_LOCK_BASE_SHIFT 10
51 #define MMU_LOCK_VICTIM_SHIFT 4
53 #define CAMERA_MMU_LOCK_BASE_MASK (0x7 << MMU_LOCK_BASE_SHIFT)
54 #define CAMERA_MMU_LOCK_VICTIM_MASK (0x7 << MMU_LOCK_VICTIM_SHIFT)
56 #define is_aligned(adr,align) (!((adr)&((align)-1)))
57 #define ORDER_1MB (20 - PAGE_SHIFT)
58 #define ORDER_64KB (16 - PAGE_SHIFT)
59 #define ORDER_4KB (12 - PAGE_SHIFT)
61 static mempool_t *mempool_1M;
62 static mempool_t *mempool_64K;
64 #define omap_mmu_for_each_tlb_entry(mmu, entry) \
65 for (entry = mmu->exmap_tbl; prefetch(entry + 1), \
66 entry < (mmu->exmap_tbl + mmu->nr_tlb_entries); \
69 #define to_dev(obj) container_of(obj, struct device, kobj)
71 static void *mempool_alloc_from_pool(mempool_t *pool,
72 unsigned int __nocast gfp_mask)
74 spin_lock_irq(&pool->lock);
75 if (likely(pool->curr_nr)) {
76 void *element = pool->elements[--pool->curr_nr];
77 spin_unlock_irq(&pool->lock);
81 spin_unlock_irq(&pool->lock);
82 return mempool_alloc(pool, gfp_mask);
86 * kmem_reserve(), kmem_release():
87 * reserve or release kernel memory for exmap().
89 * exmap() might request consecutive 1MB or 64kB,
90 * but it will be difficult after memory pages are fragmented.
91 * So, user can reserve such memory blocks in the early phase
92 * through kmem_reserve().
94 static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
96 return (void *)__get_dma_pages(gfp, (unsigned int)order);
99 static void omap_mmu_pool_free(void *buf, void *order)
101 free_pages((unsigned long)buf, (unsigned int)order);
104 int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
106 unsigned long len = size;
108 /* alignment check */
109 if (!is_aligned(size, SZ_64K)) {
111 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
115 if (size > (1 << mmu->addrspace)) {
117 "omapdsp: size(0x%lx) is larger than DSP memory space "
118 "size (0x%x.\n", size, (1 << mmu->addrspace));
125 if (likely(!mempool_1M))
126 mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
130 mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
133 size &= ~(0xf << 20);
136 if (size >= SZ_64K) {
139 if (likely(!mempool_64K))
140 mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
144 mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
147 size &= ~(0xf << 16);
155 EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
157 void omap_mmu_kmem_release(void)
160 mempool_destroy(mempool_64K);
165 mempool_destroy(mempool_1M);
169 EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
171 static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
173 struct page *page, *ps, *pe;
175 ps = virt_to_page(buf);
176 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
178 for (page = ps; page < pe; page++)
179 ClearPageReserved(page);
181 if ((order == ORDER_64KB) && likely(mempool_64K))
182 mempool_free((void *)buf, mempool_64K);
183 else if ((order == ORDER_1MB) && likely(mempool_1M))
184 mempool_free((void *)buf, mempool_1M);
186 free_pages(buf, order);
192 int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
195 unsigned long sz_left;
198 int prot_pmd, prot_pte;
201 "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
204 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
205 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
207 pmdp = pmd_offset(pgd_offset_k(virt), virt);
208 if (pmd_none(*pmdp)) {
209 ptep = pte_alloc_one_kernel(&init_mm, 0);
212 /* note: two PMDs will be set */
213 pmd_populate_kernel(&init_mm, pmdp, ptep);
218 sz_left >= PAGE_SIZE;
219 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
220 ptep = pte_offset_kernel(pmdp, virt);
221 set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
228 EXPORT_SYMBOL_GPL(exmap_set_armmmu);
230 void exmap_clear_armmmu(unsigned long virt, unsigned long size)
232 unsigned long sz_left;
237 "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
241 sz_left >= PAGE_SIZE;
242 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
243 pmdp = pmd_offset(pgd_offset_k(virt), virt);
244 ptep = pte_offset_kernel(pmdp, virt);
245 pte_clear(&init_mm, virt, ptep);
250 EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
252 int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
254 /* exmap_sem should be held before calling this function */
255 struct exmap_tbl *ent;
258 omap_mmu_for_each_tlb_entry(mmu, ent) {
260 unsigned long mapsize;
264 mapadr = (void *)ent->vadr;
265 mapsize = 1 << (ent->order + PAGE_SHIFT);
266 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
267 if (vadr + len <= mapadr + mapsize) {
268 /* this map covers whole address. */
272 * this map covers partially.
273 * check rest portion.
275 len -= mapadr + mapsize - vadr;
276 vadr = mapadr + mapsize;
284 EXPORT_SYMBOL_GPL(exmap_valid);
287 * omap_mmu_exmap_use(), unuse():
288 * when the mapped area is exported to user space with mmap,
289 * the usecount is incremented.
290 * while the usecount > 0, that area can't be released.
292 void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
294 struct exmap_tbl *ent;
296 down_write(&mmu->exmap_sem);
297 omap_mmu_for_each_tlb_entry(mmu, ent) {
299 unsigned long mapsize;
303 mapadr = (void *)ent->vadr;
304 mapsize = 1 << (ent->order + PAGE_SHIFT);
305 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
308 up_write(&mmu->exmap_sem);
310 EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
312 void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
314 struct exmap_tbl *ent;
316 down_write(&mmu->exmap_sem);
317 omap_mmu_for_each_tlb_entry(mmu, ent) {
319 unsigned long mapsize;
323 mapadr = (void *)ent->vadr;
324 mapsize = 1 << (ent->order + PAGE_SHIFT);
325 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
328 up_write(&mmu->exmap_sem);
330 EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
333 * omap_mmu_virt_to_phys()
334 * returns physical address, and sets len to valid length
337 omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
339 struct exmap_tbl *ent;
341 if (omap_mmu_internal_memory(mmu, vadr)) {
342 unsigned long addr = (unsigned long)vadr;
343 *len = mmu->membase + mmu->memsize - addr;
348 omap_mmu_for_each_tlb_entry(mmu, ent) {
350 unsigned long mapsize;
354 mapadr = (void *)ent->vadr;
355 mapsize = 1 << (ent->order + PAGE_SHIFT);
356 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
357 *len = mapadr + mapsize - vadr;
358 return __pa(ent->buf) + vadr - mapadr;
362 /* valid mapping not found */
365 EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
370 static struct cam_ram_regset *
371 omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
373 return mmu->ops->cam_ram_alloc(entry);
376 static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
377 struct cam_ram_regset *cr)
379 return mmu->ops->cam_ram_valid(cr);
383 omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
385 unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
388 mask = (mmu->type == OMAP_MMU_CAMERA) ?
389 CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
390 tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
392 mask = (mmu->type == OMAP_MMU_CAMERA) ?
393 CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
394 tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
398 omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
400 omap_mmu_write_reg(mmu,
401 (lock->base << MMU_LOCK_BASE_SHIFT) |
402 (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
405 static inline void omap_mmu_flush(struct omap_mmu *mmu)
407 omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
410 static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
412 omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
415 void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
416 struct cam_ram_regset *cr)
419 omap_mmu_set_tlb_lock(mmu, lock);
421 if (likely(mmu->ops->read_tlb))
422 mmu->ops->read_tlb(mmu, cr);
424 EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
426 void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
428 if (likely(mmu->ops->load_tlb))
429 mmu->ops->load_tlb(mmu, cr);
431 /* flush the entry */
434 /* load a TLB entry */
438 int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
439 struct omap_mmu_tlb_entry *entry)
441 struct omap_mmu_tlb_lock lock;
442 struct cam_ram_regset *cr;
444 clk_enable(mmu->clk);
445 omap_dsp_request_mem();
447 omap_mmu_get_tlb_lock(mmu, &lock);
448 for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
449 struct cam_ram_regset tmp;
451 /* read a TLB entry */
452 omap_mmu_read_tlb(mmu, &lock, &tmp);
453 if (!omap_mmu_cam_ram_valid(mmu, &tmp))
456 omap_mmu_set_tlb_lock(mmu, &lock);
459 /* The last entry cannot be locked? */
460 if (lock.victim == (mmu->nr_tlb_entries - 1)) {
461 printk(KERN_ERR "MMU: TLB is full.\n");
465 cr = omap_mmu_cam_ram_alloc(mmu, entry);
469 omap_mmu_load_tlb(mmu, cr);
472 /* update lock base */
473 if (lock.victim == lock.base)
476 omap_mmu_set_tlb_lock(mmu, &lock);
478 omap_dsp_release_mem();
479 clk_disable(mmu->clk);
482 EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
484 static inline unsigned long
485 omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
487 return mmu->ops->cam_va(cr);
490 int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
492 struct omap_mmu_tlb_lock lock;
496 clk_enable(mmu->clk);
497 omap_dsp_request_mem();
499 omap_mmu_get_tlb_lock(mmu, &lock);
500 for (i = 0; i < lock.base; i++) {
501 struct cam_ram_regset cr;
503 /* read a TLB entry */
505 omap_mmu_read_tlb(mmu, &lock, &cr);
506 if (!omap_mmu_cam_ram_valid(mmu, &cr))
509 if (omap_mmu_cam_va(mmu, &cr) == vadr)
510 /* flush the entry */
516 /* set new lock base */
517 lock.base = lock.victim = max_valid + 1;
518 omap_mmu_set_tlb_lock(mmu, &lock);
520 omap_dsp_release_mem();
521 clk_disable(mmu->clk);
524 EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
526 static void omap_mmu_gflush(struct omap_mmu *mmu)
528 struct omap_mmu_tlb_lock lock;
530 clk_enable(mmu->clk);
531 omap_dsp_request_mem();
533 omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
534 lock.base = lock.victim = mmu->nr_exmap_preserved;
535 omap_mmu_set_tlb_lock(mmu, &lock);
537 omap_dsp_release_mem();
538 clk_disable(mmu->clk);
544 * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
545 * In this case, the buffer for DSP is allocated in this routine,
547 * On the other hand, for example - frame buffer sharing, calls
548 * this function with padr set. It means some known address space
549 * pointed with padr is going to be shared with DSP.
551 int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
552 unsigned long padr, unsigned long size,
553 enum exmap_type type)
557 unsigned int order = 0;
560 unsigned long _dspadr = dspadr;
561 unsigned long _padr = padr;
562 void *_vadr = omap_mmu_to_virt(mmu, dspadr);
563 unsigned long _size = size;
564 struct omap_mmu_tlb_entry tlb_ent;
565 struct exmap_tbl *exmap_ent, *tmp_ent;
569 #define MINIMUM_PAGESZ SZ_4K
573 if (!is_aligned(size, MINIMUM_PAGESZ)) {
575 "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
578 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
580 "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
583 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
585 "MMU: physical address(0x%lx) is not aligned.\n",
590 /* address validity check */
591 if ((dspadr < mmu->memsize) ||
592 (dspadr >= (1 << mmu->addrspace))) {
594 "MMU: illegal address/size for %s().\n",
599 down_write(&mmu->exmap_sem);
602 omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
603 unsigned long mapsize;
607 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
608 if ((_vadr + size > tmp_ent->vadr) &&
609 (_vadr < tmp_ent->vadr + mapsize)) {
610 printk(KERN_ERR "MMU: exmap page overlap!\n");
611 up_write(&mmu->exmap_sem);
618 /* Are there any free TLB lines? */
619 for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
620 if (!mmu->exmap_tbl[idx].valid)
623 printk(KERN_ERR "MMU: DSP TLB is full.\n");
628 exmap_ent = mmu->exmap_tbl + idx;
630 if ((_size >= SZ_1M) &&
631 (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
632 is_aligned(_dspadr, SZ_1M)) {
634 pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
635 } else if ((_size >= SZ_64K) &&
636 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
637 is_aligned(_dspadr, SZ_64K)) {
639 pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
642 pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
645 order = get_order(unit);
647 /* buffer allocation */
648 if (type == EXMAP_TYPE_MEM) {
649 struct page *page, *ps, *pe;
651 if ((order == ORDER_1MB) && likely(mempool_1M))
652 buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
653 else if ((order == ORDER_64KB) && likely(mempool_64K))
654 buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
656 buf = (void *)__get_dma_pages(GFP_KERNEL, order);
663 /* mark the pages as reserved; this is needed for mmap */
664 ps = virt_to_page(buf);
665 pe = virt_to_page(buf + unit);
667 for (page = ps; page < pe; page++)
668 SetPageReserved(page);
674 * mapping for ARM MMU:
675 * we should not access to the allocated memory through 'buf'
676 * since this area should not be cached.
678 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
682 /* loading DSP TLB entry */
683 INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
684 status = omap_mmu_load_tlb_entry(mmu, &tlb_ent);
686 exmap_clear_armmmu((unsigned long)_vadr, unit);
690 INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
691 exmap_ent->link.prev = prev;
693 mmu->exmap_tbl[prev].link.next = idx;
695 if ((_size -= unit) == 0) { /* normal completion */
696 up_write(&mmu->exmap_sem);
702 _padr = padr ? _padr + unit : 0;
707 up_write(&mmu->exmap_sem);
709 omap_mmu_free_pages((unsigned long)buf, order);
710 omap_mmu_exunmap(mmu, dspadr);
713 EXPORT_SYMBOL_GPL(omap_mmu_exmap);
715 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
719 /* clearing ARM MMU */
720 size = 1 << (ent->order + PAGE_SHIFT);
721 exmap_clear_armmmu((unsigned long)ent->vadr, size);
723 /* freeing allocated memory */
724 if (ent->type == EXMAP_TYPE_MEM) {
725 omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
727 "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
735 int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
740 struct exmap_tbl *ent;
743 vadr = omap_mmu_to_virt(mmu, dspadr);
744 down_write(&mmu->exmap_sem);
745 for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
746 ent = mmu->exmap_tbl + idx;
747 if (!ent->valid || ent->prsvd)
749 if (ent->vadr == vadr)
752 up_write(&mmu->exmap_sem);
754 "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
758 if (ent->usecount > 0) {
760 "MMU: exmap reference count is not 0.\n"
761 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
762 idx, ent->vadr, ent->order, ent->usecount);
763 up_write(&mmu->exmap_sem);
766 /* clearing DSP TLB entry */
767 omap_mmu_clear_tlb_entry(mmu, dspadr);
769 /* clear ARM MMU and free buffer */
770 size = unmap_free_arm(ent);
773 /* we don't free PTEs */
776 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
778 /* check if next mapping is in same group */
779 idx = ent->link.next;
781 goto up_out; /* normal completion */
782 ent = mmu->exmap_tbl + idx;
785 if (ent->vadr == vadr)
786 goto found_map; /* continue */
789 "MMU: illegal exmap_tbl grouping!\n"
790 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
791 vadr, idx, ent->vadr);
792 up_write(&mmu->exmap_sem);
796 up_write(&mmu->exmap_sem);
799 EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
801 void omap_mmu_exmap_flush(struct omap_mmu *mmu)
803 struct exmap_tbl *ent;
805 down_write(&mmu->exmap_sem);
807 /* clearing TLB entry */
808 omap_mmu_gflush(mmu);
810 omap_mmu_for_each_tlb_entry(mmu, ent)
811 if (ent->valid && !ent->prsvd)
815 if (likely(mmu->membase))
816 flush_tlb_kernel_range(mmu->membase + mmu->memsize,
817 mmu->membase + (1 << mmu->addrspace));
819 up_write(&mmu->exmap_sem);
821 EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
823 void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
824 unsigned long dspadr, int index)
828 struct omap_mmu_tlb_entry tlb_ent;
831 virt = omap_mmu_to_virt(mmu, dspadr);
832 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
833 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
834 INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
835 omap_mmu_load_tlb_entry(mmu, &tlb_ent);
837 EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
839 void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
841 void *virt = omap_mmu_to_virt(mmu, dspadr);
843 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
844 /* DSP MMU is shutting down. not handled here. */
846 EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
848 static void omap_mmu_reset(struct omap_mmu *mmu)
852 omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
854 for (i = 0; i < 10000; i++)
855 if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
859 void omap_mmu_disable(struct omap_mmu *mmu)
861 omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
863 EXPORT_SYMBOL_GPL(omap_mmu_disable);
865 void omap_mmu_enable(struct omap_mmu *mmu, int reset)
870 omap_mmu_write_reg(mmu, 0x2, MMU_CNTL);
872 EXPORT_SYMBOL_GPL(omap_mmu_enable);
874 static int omap_mmu_init(struct omap_mmu *mmu)
876 struct omap_mmu_tlb_lock tlb_lock;
879 clk_enable(mmu->clk);
880 omap_dsp_request_mem();
881 down_write(&mmu->exmap_sem);
883 omap_mmu_disable(mmu); /* clear all */
885 omap_mmu_enable(mmu, 1);
887 memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
888 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
890 if (unlikely(mmu->ops->startup))
891 ret = mmu->ops->startup(mmu);
893 up_write(&mmu->exmap_sem);
894 omap_dsp_release_mem();
895 clk_disable(mmu->clk);
900 static void omap_mmu_shutdown(struct omap_mmu *mmu)
902 if (unlikely(mmu->ops->shutdown))
903 mmu->ops->shutdown(mmu);
905 omap_mmu_exmap_flush(mmu);
906 omap_mmu_disable(mmu); /* clear all */
910 * omap_mmu_mem_enable() / disable()
912 int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
914 if (unlikely(mmu->ops->mem_enable))
915 return mmu->ops->mem_enable(mmu, addr);
917 down_read(&mmu->exmap_sem);
920 EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
922 void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
924 if (unlikely(mmu->ops->mem_disable)) {
925 mmu->ops->mem_disable(mmu, addr);
929 up_read(&mmu->exmap_sem);
931 EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
934 * dsp_mem file operations
936 static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
939 unsigned long p = *ppos;
940 void *vadr = omap_mmu_to_virt(mmu, p);
941 ssize_t size = mmu->memsize;
946 clk_enable(mmu->memclk);
948 if (count > size - p)
950 if (copy_to_user(buf, vadr, read)) {
956 clk_disable(mmu->memclk);
960 static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
963 unsigned long p = *ppos;
964 void *vadr = omap_mmu_to_virt(mmu, p);
966 if (!exmap_valid(mmu, vadr, count)) {
968 "MMU: DSP address %08lx / size %08x "
969 "is not valid!\n", p, count);
972 if (count > (1 << mmu->addrspace) - p)
973 count = (1 << mmu->addrspace) - p;
974 if (copy_to_user(buf, vadr, count))
981 static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
982 loff_t offset, size_t count)
984 struct device *dev = to_dev(kobj);
985 struct omap_mmu *mmu = dev_get_drvdata(dev);
986 unsigned long p = (unsigned long)offset;
987 void *vadr = omap_mmu_to_virt(mmu, p);
990 if (omap_mmu_mem_enable(mmu, vadr) < 0)
993 if (p < mmu->memsize)
994 ret = intmem_read(mmu, buf, count, &offset);
996 ret = exmem_read(mmu, buf, count, &offset);
998 omap_mmu_mem_disable(mmu, vadr);
1003 static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
1006 unsigned long p = *ppos;
1007 void *vadr = omap_mmu_to_virt(mmu, p);
1008 ssize_t size = mmu->memsize;
1013 clk_enable(mmu->memclk);
1015 if (count > size - p)
1017 if (copy_from_user(vadr, buf, written)) {
1023 clk_disable(mmu->memclk);
1027 static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
1030 unsigned long p = *ppos;
1031 void *vadr = omap_mmu_to_virt(mmu, p);
1033 if (!exmap_valid(mmu, vadr, count)) {
1035 "MMU: DSP address %08lx / size %08x "
1036 "is not valid!\n", p, count);
1039 if (count > (1 << mmu->addrspace) - p)
1040 count = (1 << mmu->addrspace) - p;
1041 if (copy_from_user(vadr, buf, count))
1048 static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
1049 loff_t offset, size_t count)
1051 struct device *dev = to_dev(kobj);
1052 struct omap_mmu *mmu = dev_get_drvdata(dev);
1053 unsigned long p = (unsigned long)offset;
1054 void *vadr = omap_mmu_to_virt(mmu, p);
1057 if (omap_mmu_mem_enable(mmu, vadr) < 0)
1060 if (p < mmu->memsize)
1061 ret = intmem_write(mmu, buf, count, &offset);
1063 ret = exmem_write(mmu, buf, count, &offset);
1065 omap_mmu_mem_disable(mmu, vadr);
1070 static struct bin_attribute dev_attr_mem = {
1073 .owner = THIS_MODULE,
1074 .mode = S_IRUSR | S_IWUSR | S_IRGRP,
1077 .read = omap_mmu_mem_read,
1078 .write = omap_mmu_mem_write,
1081 /* To be obsolete for backward compatibility */
1082 ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
1083 loff_t offset, size_t count)
1085 return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
1087 EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
1089 ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
1090 loff_t offset, size_t count)
1092 return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
1094 EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
1099 static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
1102 struct omap_mmu *mmu = dev_get_drvdata(dev);
1103 struct omap_mmu_tlb_lock tlb_lock;
1106 clk_enable(mmu->clk);
1107 omap_dsp_request_mem();
1109 down_read(&mmu->exmap_sem);
1111 omap_mmu_get_tlb_lock(mmu, &tlb_lock);
1113 if (likely(mmu->ops->show))
1114 ret = mmu->ops->show(mmu, buf, &tlb_lock);
1116 /* restore victim entry */
1117 omap_mmu_set_tlb_lock(mmu, &tlb_lock);
1119 up_read(&mmu->exmap_sem);
1120 omap_dsp_release_mem();
1121 clk_disable(mmu->clk);
1126 static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
1128 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1131 struct omap_mmu *mmu = dev_get_drvdata(dev);
1132 struct exmap_tbl *ent;
1136 down_read(&mmu->exmap_sem);
1137 len = sprintf(buf, " dspadr size buf size uc\n");
1138 /* 0x300000 0x123000 0xc0171000 0x100000 0*/
1140 omap_mmu_for_each_tlb_entry(mmu, ent) {
1143 enum exmap_type type;
1146 /* find a top of link */
1147 if (!ent->valid || (ent->link.prev >= 0))
1155 ent = mmu->exmap_tbl + idx;
1156 size += PAGE_SIZE << ent->order;
1157 } while ((idx = ent->link.next) >= 0);
1159 len += sprintf(buf + len, "0x%06lx %#8lx",
1160 virt_to_omap_mmu(mmu, vadr), size);
1162 if (type == EXMAP_TYPE_FB) {
1163 len += sprintf(buf + len, " framebuf\n");
1165 len += sprintf(buf + len, "\n");
1168 ent = mmu->exmap_tbl + idx;
1169 len += sprintf(buf + len,
1170 /* 0xc0171000 0x100000 0*/
1171 "%19s0x%8p %#8lx %2d\n",
1173 PAGE_SIZE << ent->order,
1175 } while ((idx = ent->link.next) >= 0);
1181 up_read(&mmu->exmap_sem);
1185 static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
1189 struct omap_mmu *mmu = dev_get_drvdata(dev);
1190 unsigned long base = 0, len = 0;
1193 sscanf(buf, "%lx %lx", &base, &len);
1199 /* Add the mapping */
1200 ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
1204 /* Remove the mapping */
1205 ret = omap_mmu_exunmap(mmu, base);
1213 static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
1215 static ssize_t mempool_show(struct class *class, char *buf)
1217 int min_nr_1M = 0, curr_nr_1M = 0;
1218 int min_nr_64K = 0, curr_nr_64K = 0;
1221 if (likely(mempool_1M)) {
1222 min_nr_1M = mempool_1M->min_nr;
1223 curr_nr_1M = mempool_1M->curr_nr;
1224 total += min_nr_1M * SZ_1M;
1226 if (likely(mempool_64K)) {
1227 min_nr_64K = mempool_64K->min_nr;
1228 curr_nr_64K = mempool_64K->curr_nr;
1229 total += min_nr_64K * SZ_64K;
1234 "1M buffer: %d (%d free)\n"
1235 "64K buffer: %d (%d free)\n",
1236 total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
1240 static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
1242 static void omap_mmu_class_dev_release(struct device *dev)
1246 static struct class omap_mmu_class = {
1248 .dev_release = omap_mmu_class_dev_release,
1251 int omap_mmu_register(struct omap_mmu *mmu)
1255 mmu->dev.class = &omap_mmu_class;
1256 strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
1257 dev_set_drvdata(&mmu->dev, mmu);
1259 mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
1261 if (!mmu->exmap_tbl)
1264 ret = device_register(&mmu->dev);
1266 goto err_dev_register;
1268 init_rwsem(&mmu->exmap_sem);
1270 ret = omap_mmu_read_reg(mmu, MMU_REVISION);
1271 printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
1272 mmu->name, (ret >> 4) & 0xf, ret & 0xf);
1274 ret = omap_mmu_init(mmu);
1278 ret = device_create_file(&mmu->dev, &dev_attr_mmu);
1280 goto err_dev_create_mmu;
1281 ret = device_create_file(&mmu->dev, &dev_attr_exmap);
1283 goto err_dev_create_exmap;
1285 if (likely(mmu->membase)) {
1286 dev_attr_mem.size = mmu->memsize;
1287 ret = device_create_bin_file(&mmu->dev,
1290 goto err_bin_create_mem;
1296 device_remove_file(&mmu->dev, &dev_attr_exmap);
1297 err_dev_create_exmap:
1298 device_remove_file(&mmu->dev, &dev_attr_mmu);
1300 omap_mmu_shutdown(mmu);
1302 device_unregister(&mmu->dev);
1304 kfree(mmu->exmap_tbl);
1305 mmu->exmap_tbl = NULL;
1308 EXPORT_SYMBOL_GPL(omap_mmu_register);
1310 void omap_mmu_unregister(struct omap_mmu *mmu)
1312 omap_mmu_shutdown(mmu);
1313 omap_mmu_kmem_release();
1315 device_remove_file(&mmu->dev, &dev_attr_mmu);
1316 device_remove_file(&mmu->dev, &dev_attr_exmap);
1318 if (likely(mmu->membase))
1319 device_remove_bin_file(&mmu->dev,
1322 kfree(mmu->exmap_tbl);
1323 mmu->exmap_tbl = NULL;
1325 device_unregister(&mmu->dev);
1327 EXPORT_SYMBOL_GPL(omap_mmu_unregister);
1329 static int __init omap_mmu_class_init(void)
1331 int ret = class_register(&omap_mmu_class);
1333 ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
1338 static void __exit omap_mmu_class_exit(void)
1340 class_remove_file(&omap_mmu_class, &class_attr_mempool);
1341 class_unregister(&omap_mmu_class);
1344 subsys_initcall(omap_mmu_class_init);
1345 module_exit(omap_mmu_class_exit);
1347 MODULE_LICENSE("GPL");