2 * linux/arch/arm/mach-omap/dsp/dsp_mem.c
4 * OMAP DSP memory driver
6 * Copyright (C) 2002-2005 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
25 * 2005/06/09: DSP Gateway version 3.3
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/major.h>
32 #include <linux/bootmem.h>
34 #include <linux/interrupt.h>
35 #include <linux/delay.h>
36 #include <linux/platform_device.h>
37 #include <asm/uaccess.h>
39 #include <asm/ioctls.h>
41 #include <asm/pgalloc.h>
42 #include <asm/pgtable.h>
43 #include <asm/hardware/clock.h>
44 #include <asm/arch/tc.h>
45 #include <asm/arch/dsp.h>
46 #include <asm/arch/dsp_common.h>
47 #include "uaccess_dsp.h"
50 #define SZ_1MB 0x100000
51 #define SZ_64KB 0x10000
54 #define is_aligned(adr,align) (!((adr)&((align)-1)))
55 #define ORDER_1MB (20 - PAGE_SHIFT)
56 #define ORDER_64KB (16 - PAGE_SHIFT)
57 #define ORDER_4KB (12 - PAGE_SHIFT)
59 #define PGDIR_MASK (~(PGDIR_SIZE-1))
60 #define PGDIR_ALIGN(addr) (((addr)+PGDIR_SIZE-1)&(PGDIR_MASK))
62 #define dsp_mmu_enable() \
64 omap_writew(DSPMMU_CNTL_MMU_EN | DSPMMU_CNTL_RESET_SW, \
67 #define dsp_mmu_disable() \
68 do { omap_writew(0, DSPMMU_CNTL); } while(0)
69 #define dsp_mmu_flush() \
71 omap_writew(DSPMMU_FLUSH_ENTRY_FLUSH_ENTRY, \
72 DSPMMU_FLUSH_ENTRY); \
74 #define __dsp_mmu_gflush() \
75 do { omap_writew(DSPMMU_GFLUSH_GFLUSH, DSPMMU_GFLUSH); } while(0)
76 #define __dsp_mmu_itack() \
77 do { omap_writew(DSPMMU_IT_ACK_IT_ACK, DSPMMU_IT_ACK); } while(0)
79 #define EMIF_PRIO_LB_MASK 0x0000f000
80 #define EMIF_PRIO_LB_SHIFT 12
81 #define EMIF_PRIO_DMA_MASK 0x00000f00
82 #define EMIF_PRIO_DMA_SHIFT 8
83 #define EMIF_PRIO_DSP_MASK 0x00000070
84 #define EMIF_PRIO_DSP_SHIFT 4
85 #define EMIF_PRIO_MPU_MASK 0x00000007
86 #define EMIF_PRIO_MPU_SHIFT 0
87 #define set_emiff_dma_prio(prio) \
89 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
90 ~EMIF_PRIO_DMA_MASK) | \
91 ((prio) << EMIF_PRIO_DMA_SHIFT), \
92 OMAP_TC_OCPT1_PRIOR); \
101 unsigned int valid:1;
102 unsigned int cntnu:1; /* grouping */
103 int usecount; /* reference count by mmap */
104 enum exmap_type type;
105 void *buf; /* virtual address of the buffer,
106 * i.e. 0xc0000000 - */
107 void *vadr; /* DSP shadow space,
108 * i.e. 0xe0000000 - 0xe0ffffff */
111 #define DSPMMU_TLB_LINES 32
112 static struct exmap_tbl exmap_tbl[DSPMMU_TLB_LINES];
113 static DECLARE_RWSEM(exmap_sem);
115 static int dsp_exunmap(unsigned long dspadr);
117 static void *dspvect_page;
118 static unsigned long dsp_fault_adr;
119 static struct mem_sync_struct mem_sync;
121 static __inline__ unsigned long lineup_offset(unsigned long adr,
125 unsigned long newadr;
127 newadr = (adr & ~mask) | (ref & mask);
133 void dsp_mem_sync_inc(void)
136 * FIXME: dsp_mem_enable()!!!
139 mem_sync.DARAM->ad_arm++;
141 mem_sync.SARAM->ad_arm++;
143 mem_sync.SDRAM->ad_arm++;
147 * dsp_mem_sync_config() is called from mbx1 workqueue
149 int dsp_mem_sync_config(struct mem_sync_struct *sync)
151 size_t sync_seq_sz = sizeof(struct sync_seq);
153 #ifdef OLD_BINARY_SUPPORT
155 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
159 if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
160 (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
161 (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
163 "omapdsp: mem_sync address validation failure!\n"
164 " mem_sync.DARAM = 0x%p,\n"
165 " mem_sync.SARAM = 0x%p,\n"
166 " mem_sync.SDRAM = 0x%p,\n",
167 sync->DARAM, sync->SARAM, sync->SDRAM);
170 memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
175 * kmem_reserve(), kmem_release():
176 * reserve or release kernel memory for exmap().
178 * exmap() might request consecutive 1MB or 64kB,
179 * but it will be difficult after memory pages are fragmented.
180 * So, user can reserve such memory blocks in the early phase
181 * through kmem_reserve().
184 struct semaphore sem;
185 unsigned long buf[16];
189 #define KMEM_POOL_INIT(name) \
191 .sem = __SEMAPHORE_INIT((name).sem, 1), \
193 #define DECLARE_KMEM_POOL(name) \
194 struct kmem_pool name = KMEM_POOL_INIT(name)
196 DECLARE_KMEM_POOL(kmem_pool_1M);
197 DECLARE_KMEM_POOL(kmem_pool_64K);
199 static void dsp_kmem_release(void)
203 down(&kmem_pool_1M.sem);
204 for (i = 0; i < kmem_pool_1M.count; i++) {
205 if (kmem_pool_1M.buf[i])
206 free_pages(kmem_pool_1M.buf[i], ORDER_1MB);
208 kmem_pool_1M.count = 0;
209 up(&kmem_pool_1M.sem);
211 down(&kmem_pool_64K.sem);
212 for (i = 0; i < kmem_pool_64K.count; i++) {
213 if (kmem_pool_64K.buf[i])
214 free_pages(kmem_pool_64K.buf[i], ORDER_64KB);
216 kmem_pool_64K.count = 0;
217 up(&kmem_pool_1M.sem);
220 static int dsp_kmem_reserve(unsigned long size)
226 struct kmem_pool *pool;
229 /* alignment check */
230 if (!is_aligned(size, SZ_64KB)) {
232 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
235 if (size > DSPSPACE_SIZE) {
237 "omapdsp: size(0x%lx) is larger than DSP memory space "
238 "size (0x%x.\n", size, DSPSPACE_SIZE);
242 for (_size = size; _size; _size -= unit) {
243 if (_size >= SZ_1MB) {
246 pool = &kmem_pool_1M;
250 pool = &kmem_pool_64K;
253 buf = __get_dma_pages(GFP_KERNEL, order);
257 for (i = 0; i < 16; i++) {
267 if (buf) { /* pool is full */
268 free_pages(buf, order);
276 static unsigned long dsp_mem_get_dma_pages(unsigned int order)
278 struct kmem_pool *pool;
279 unsigned long buf = 0;
284 pool = &kmem_pool_1M;
287 pool = &kmem_pool_64K;
295 for (i = 0; i < pool->count; i++) {
307 /* other size or not found in pool */
308 return __get_dma_pages(GFP_KERNEL, order);
311 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
313 struct kmem_pool *pool;
314 struct page *page, *ps, *pe;
317 ps = virt_to_page(buf);
318 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
319 for (page = ps; page < pe; page++) {
320 ClearPageReserved(page);
324 * return buffer to kmem_pool or paging system
328 pool = &kmem_pool_1M;
331 pool = &kmem_pool_64K;
339 for (i = 0; i < pool->count; i++) {
348 /* other size or pool is filled */
350 free_pages(buf, order);
356 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
360 unsigned long sz_left;
363 int prot_pmd, prot_pte;
366 "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
369 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
370 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
372 pmdp = pmd_offset(pgd_offset_k(virt), virt);
373 if (pmd_none(*pmdp)) {
374 ptep = pte_alloc_one_kernel(&init_mm, 0);
377 /* note: two PMDs will be set */
378 pmd_populate_kernel(&init_mm, pmdp, ptep);
383 sz_left >= PAGE_SIZE;
384 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
385 ptep = pte_offset_kernel(pmdp, virt);
386 set_pte(ptep, __pte((virt + off) | prot_pte));
394 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
396 unsigned long sz_left;
401 "omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
405 sz_left >= PAGE_SIZE;
406 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
407 pmdp = pmd_offset(pgd_offset_k(virt), virt);
408 ptep = pte_offset_kernel(pmdp, virt);
409 pte_clear(&init_mm, virt, ptep);
415 static int exmap_valid(void *vadr, size_t len)
417 /* exmap_sem should be held before calling this function */
421 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
423 unsigned long mapsize;
424 struct exmap_tbl *ent = &exmap_tbl[i];
428 mapadr = (void *)ent->vadr;
429 mapsize = 1 << (ent->order + PAGE_SHIFT);
430 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
431 if (vadr + len <= mapadr + mapsize) {
432 /* this map covers whole address. */
436 * this map covers partially.
437 * check rest portion.
439 len -= mapadr + mapsize - vadr;
440 vadr = mapadr + mapsize;
449 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
451 void *ds = (void *)daram_base;
452 void *de = (void *)daram_base + daram_size;
453 void *ss = (void *)saram_base;
454 void *se = (void *)saram_base + saram_size;
457 if ((vadr >= ds) && (vadr < de)) {
459 return MEM_TYPE_CROSSING;
461 return MEM_TYPE_DARAM;
462 } else if ((vadr >= ss) && (vadr < se)) {
464 return MEM_TYPE_CROSSING;
466 return MEM_TYPE_SARAM;
468 down_read(&exmap_sem);
469 if (exmap_valid(vadr, len))
470 ret = MEM_TYPE_EXTERN;
478 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
480 if (dsp_mem_type(p, len) <= 0) {
486 vsprintf(s, fmt, args);
489 "omapdsp: %s address(0x%p) and size(0x%x) is "
491 " (crossing different type of memories, or \n"
492 " external memory space where no "
493 "actual memory is mapped)\n",
503 * exmap_use(), unuse():
504 * when the mapped area is exported to user space with mmap,
505 * the usecount is incremented.
506 * while the usecount > 0, that area can't be released.
508 void exmap_use(void *vadr, size_t len)
512 down_write(&exmap_sem);
513 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
515 unsigned long mapsize;
516 struct exmap_tbl *ent = &exmap_tbl[i];
520 mapadr = (void *)ent->vadr;
521 mapsize = 1 << (ent->order + PAGE_SHIFT);
522 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
526 up_write(&exmap_sem);
529 void exmap_unuse(void *vadr, size_t len)
533 down_write(&exmap_sem);
534 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
536 unsigned long mapsize;
537 struct exmap_tbl *ent = &exmap_tbl[i];
541 mapadr = (void *)ent->vadr;
542 mapsize = 1 << (ent->order + PAGE_SHIFT);
543 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
547 up_write(&exmap_sem);
552 * returns physical address, and sets len to valid length
554 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
558 if (is_dsp_internal_mem(vadr)) {
560 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
561 return (unsigned long)vadr;
565 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
567 unsigned long mapsize;
568 struct exmap_tbl *ent = &exmap_tbl[i];
572 mapadr = (void *)ent->vadr;
573 mapsize = 1 << (ent->order + PAGE_SHIFT);
574 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
575 *len = mapadr + mapsize - vadr;
576 return __pa(ent->buf) + vadr - mapadr;
580 /* valid mapping not found */
587 static __inline__ unsigned short get_cam_l_va_mask(unsigned short slst)
590 case DSPMMU_CAM_L_SLST_1MB:
591 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
592 DSPMMU_CAM_L_VA_TAG_L2_MASK_1MB;
593 case DSPMMU_CAM_L_SLST_64KB:
594 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
595 DSPMMU_CAM_L_VA_TAG_L2_MASK_64KB;
596 case DSPMMU_CAM_L_SLST_4KB:
597 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
598 DSPMMU_CAM_L_VA_TAG_L2_MASK_4KB;
599 case DSPMMU_CAM_L_SLST_1KB:
600 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
601 DSPMMU_CAM_L_VA_TAG_L2_MASK_1KB;
606 static __inline__ void get_tlb_lock(int *base, int *victim)
608 unsigned short lock = omap_readw(DSPMMU_LOCK);
610 *base = (lock & DSPMMU_LOCK_BASE_MASK)
611 >> DSPMMU_LOCK_BASE_SHIFT;
613 *victim = (lock & DSPMMU_LOCK_VICTIM_MASK)
614 >> DSPMMU_LOCK_VICTIM_SHIFT;
617 static __inline__ void set_tlb_lock(int base, int victim)
619 omap_writew((base << DSPMMU_LOCK_BASE_SHIFT) |
620 (victim << DSPMMU_LOCK_VICTIM_SHIFT), DSPMMU_LOCK);
623 static __inline__ void __read_tlb(unsigned short lbase, unsigned short victim,
624 unsigned short *cam_h, unsigned short *cam_l,
625 unsigned short *ram_h, unsigned short *ram_l)
628 set_tlb_lock(lbase, victim);
630 /* read a TLB entry */
631 omap_writew(DSPMMU_LD_TLB_RD, DSPMMU_LD_TLB);
634 *cam_h = omap_readw(DSPMMU_READ_CAM_H);
636 *cam_l = omap_readw(DSPMMU_READ_CAM_L);
638 *ram_h = omap_readw(DSPMMU_READ_RAM_H);
640 *ram_l = omap_readw(DSPMMU_READ_RAM_L);
643 static __inline__ void __load_tlb(unsigned short cam_h, unsigned short cam_l,
644 unsigned short ram_h, unsigned short ram_l)
646 omap_writew(cam_h, DSPMMU_CAM_H);
647 omap_writew(cam_l, DSPMMU_CAM_L);
648 omap_writew(ram_h, DSPMMU_RAM_H);
649 omap_writew(ram_l, DSPMMU_RAM_L);
651 /* flush the entry */
654 /* load a TLB entry */
655 omap_writew(DSPMMU_LD_TLB_LD, DSPMMU_LD_TLB);
658 static int dsp_mmu_load_tlb(unsigned long vadr, unsigned long padr,
659 unsigned short slst, unsigned short prsvd,
663 unsigned short cam_l_va_mask;
665 clk_use(dsp_ck_handle);
667 get_tlb_lock(&lbase, NULL);
668 for (victim = 0; victim < lbase; victim++) {
669 unsigned short cam_l;
671 /* read a TLB entry */
672 __read_tlb(lbase, victim, NULL, &cam_l, NULL, NULL);
673 if (!(cam_l & DSPMMU_CAM_L_V))
676 set_tlb_lock(lbase, victim);
679 /* The last (31st) entry cannot be locked? */
681 printk(KERN_ERR "omapdsp: TLB is full.\n");
685 cam_l_va_mask = get_cam_l_va_mask(slst);
687 ~(DSPMMU_CAM_H_VA_TAG_H_MASK << 22 |
688 (unsigned long)cam_l_va_mask << 6)) {
690 "omapdsp: mapping vadr (0x%06lx) is not "
691 "aligned boundary\n", vadr);
695 __load_tlb(vadr >> 22, (vadr >> 6 & cam_l_va_mask) | prsvd | slst,
696 padr >> 16, (padr & DSPMMU_RAM_L_RAM_LSB_MASK) | ap);
698 /* update lock base */
701 set_tlb_lock(lbase, lbase);
703 clk_unuse(dsp_ck_handle);
707 static int dsp_mmu_clear_tlb(unsigned long vadr)
713 clk_use(dsp_ck_handle);
715 get_tlb_lock(&lbase, NULL);
716 for (i = 0; i < lbase; i++) {
717 unsigned short cam_h, cam_l;
718 unsigned short cam_l_va_mask, cam_vld, slst;
719 unsigned long cam_va;
721 /* read a TLB entry */
722 __read_tlb(lbase, i, &cam_h, &cam_l, NULL, NULL);
724 cam_vld = cam_l & DSPMMU_CAM_L_V;
728 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
729 cam_l_va_mask = get_cam_l_va_mask(slst);
730 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
731 (unsigned long)(cam_l & cam_l_va_mask) << 6;
734 /* flush the entry */
740 /* set new lock base */
741 set_tlb_lock(max_valid+1, max_valid+1);
743 clk_unuse(dsp_ck_handle);
747 static void dsp_mmu_gflush(void)
749 clk_use(dsp_ck_handle);
754 clk_unuse(dsp_ck_handle);
760 * OMAP_DSP_MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
761 * In this case, the buffer for DSP is allocated in this routine,
763 * On the other hand, for example - frame buffer sharing, calls
764 * this function with padr set. It means some known address space
765 * pointed with padr is going to be shared with DSP.
767 static int dsp_exmap(unsigned long dspadr, unsigned long padr,
768 unsigned long size, enum exmap_type type)
772 unsigned int order = 0;
774 unsigned int cntnu = 0;
775 unsigned long _dspadr = dspadr;
776 unsigned long _padr = padr;
777 void *_vadr = dspbyte_to_virt(dspadr);
778 unsigned long _size = size;
779 struct exmap_tbl *exmap_ent;
783 #define MINIMUM_PAGESZ SZ_4KB
787 if (!is_aligned(size, MINIMUM_PAGESZ)) {
789 "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
792 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
794 "omapdsp: DSP address(0x%lx) is not aligned.\n", dspadr);
797 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
799 "omapdsp: physical address(0x%lx) is not aligned.\n",
804 /* address validity check */
805 if ((dspadr < dspmem_size) ||
806 (dspadr >= DSPSPACE_SIZE) ||
807 ((dspadr + size > DSP_INIT_PAGE) &&
808 (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
810 "omapdsp: illegal address/size for dsp_exmap().\n");
814 down_write(&exmap_sem);
817 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
818 unsigned long mapsize;
819 struct exmap_tbl *tmp_ent = &exmap_tbl[i];
823 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
824 if ((_vadr + size > tmp_ent->vadr) &&
825 (_vadr < tmp_ent->vadr + mapsize)) {
826 printk(KERN_ERR "omapdsp: exmap page overlap!\n");
827 up_write(&exmap_sem);
834 /* Are there any free TLB lines? */
835 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
836 if (!exmap_tbl[i].valid)
839 printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
844 exmap_ent = &exmap_tbl[i];
846 if ((_size >= SZ_1MB) &&
847 (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
848 is_aligned(_dspadr, SZ_1MB)) {
850 slst = DSPMMU_CAM_L_SLST_1MB;
852 } else if ((_size >= SZ_64KB) &&
853 (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
854 is_aligned(_dspadr, SZ_64KB)) {
856 slst = DSPMMU_CAM_L_SLST_64KB;
858 } else /* if (_size >= SZ_4KB) */ {
860 slst = DSPMMU_CAM_L_SLST_4KB;
863 #if 0 /* 1KB is not enabled */
864 else if (_size >= SZ_1KB) {
866 slst = DSPMMU_CAM_L_SLST_1KB;
871 /* buffer allocation */
872 if (type == EXMAP_TYPE_MEM) {
873 struct page *page, *ps, *pe;
875 buf = (void *)dsp_mem_get_dma_pages(order);
880 /* mark the pages as reserved; this is needed for mmap */
881 ps = virt_to_page(buf);
882 pe = virt_to_page(buf + unit);
883 for (page = ps; page < pe; page++) {
884 SetPageReserved(page);
890 * mapping for ARM MMU:
891 * we should not access to the allocated memory through 'buf'
892 * since this area should not be cashed.
894 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
898 /* loading DSP TLB entry */
899 status = dsp_mmu_load_tlb(_dspadr, _padr, slst, 0, DSPMMU_RAM_L_AP_FA);
901 exmap_clear_armmmu((unsigned long)_vadr, unit);
905 exmap_ent->buf = buf;
906 exmap_ent->vadr = _vadr;
907 exmap_ent->order = order;
908 exmap_ent->valid = 1;
909 exmap_ent->cntnu = cntnu;
910 exmap_ent->type = type;
911 exmap_ent->usecount = 0;
913 if ((_size -= unit) == 0) { /* normal completion */
914 up_write(&exmap_sem);
920 _padr = padr ? _padr + unit : 0;
925 up_write(&exmap_sem);
927 dsp_mem_free_pages((unsigned long)buf, order);
932 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
936 /* clearing ARM MMU */
937 size = 1 << (ent->order + PAGE_SHIFT);
938 exmap_clear_armmmu((unsigned long)ent->vadr, size);
940 /* freeing allocated memory */
941 if (ent->type == EXMAP_TYPE_MEM) {
942 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
944 "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
951 static int dsp_exunmap(unsigned long dspadr)
956 struct exmap_tbl *ent;
959 vadr = dspbyte_to_virt(dspadr);
960 down_write(&exmap_sem);
961 for (idx = 0; idx < DSPMMU_TLB_LINES; idx++) {
962 ent = &exmap_tbl[idx];
965 if (ent->vadr == vadr)
968 up_write(&exmap_sem);
970 "omapdsp: address %06lx not found in exmap_tbl.\n", dspadr);
974 if (ent->usecount > 0) {
976 "omapdsp: exmap reference count is not 0.\n"
977 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
978 idx, ent->vadr, ent->order, ent->usecount);
979 up_write(&exmap_sem);
982 /* clearing DSP TLB entry */
983 dsp_mmu_clear_tlb(dspadr);
985 /* clear ARM MMU and free buffer */
986 size = unmap_free_arm(ent);
990 /* we don't free PTEs */
993 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
995 /* check if next mapping is in same group */
996 if (++idx == DSPMMU_TLB_LINES)
997 goto up_out; /* normal completion */
998 ent = &exmap_tbl[idx];
999 if (!ent->valid || !ent->cntnu)
1000 goto up_out; /* normal completion */
1004 if (ent->vadr == vadr)
1005 goto found_map; /* continue */
1008 "omapdsp: illegal exmap_tbl grouping!\n"
1009 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1010 vadr, idx, ent->vadr);
1011 up_write(&exmap_sem);
1015 up_write(&exmap_sem);
1019 static void exmap_flush(void)
1021 struct exmap_tbl *ent;
1024 down_write(&exmap_sem);
1026 /* clearing DSP TLB entry */
1029 /* exmap_tbl[0] should be preserved */
1030 for (i = 1; i < DSPMMU_TLB_LINES; i++) {
1031 ent = &exmap_tbl[i];
1033 unmap_free_arm(ent);
1039 flush_tlb_kernel_range(dspmem_base + dspmem_size,
1040 dspmem_base + DSPSPACE_SIZE);
1041 up_write(&exmap_sem);
1044 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1046 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1047 #endif /* CONFIG_FB */
1049 static int dsp_fbexport(unsigned long *dspadr)
1051 unsigned long dspadr_actual;
1052 unsigned long padr_sys, padr, fbsz_sys, fbsz;
1055 printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1057 if (num_registered_fb == 0) {
1058 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1061 if (num_registered_fb != 1) {
1063 "omapdsp: %d frame buffers found. we use first one.\n",
1066 padr_sys = registered_fb[0]->fix.smem_start;
1067 fbsz_sys = registered_fb[0]->fix.smem_len;
1068 if (fbsz_sys == 0) {
1070 "omapdsp: framebuffer doesn't seem to be configured "
1071 "correctly! (size=0)\n");
1076 * align padr and fbsz to 4kB boundary
1077 * (should be noted to the user afterwards!)
1079 padr = padr_sys & ~(SZ_4KB-1);
1080 fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1082 /* line up dspadr offset with padr */
1084 (fbsz > SZ_1MB) ? lineup_offset(*dspadr, padr, SZ_1MB-1) :
1085 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1086 /* (fbsz > SZ_4KB) ? */ *dspadr;
1087 if (dspadr_actual != *dspadr)
1089 "omapdsp: actual dspadr for FBEXPORT = %08lx\n",
1091 *dspadr = dspadr_actual;
1093 cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1095 printk(KERN_ERR "omapdsp: exmap failure.\n");
1099 if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1101 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1102 " !! screen base address or size is not aligned in 4kB: !!\n"
1103 " !! actual screen adr = %08lx, size = %08lx !!\n"
1104 " !! exporting adr = %08lx, size = %08lx !!\n"
1105 " !! Make sure that the framebuffer is allocated with 4kB-order! !!\n"
1106 " !! Otherwise DSP can corrupt the kernel memory. !!\n"
1107 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1108 padr_sys, fbsz_sys, padr, fbsz);
1111 /* increase the DMA priority */
1112 set_emiff_dma_prio(15);
1117 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1119 static int dsp_fbexport(unsigned long *dspadr)
1121 printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1125 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1127 static int dsp_mmu_itack(void)
1129 unsigned long dspadr;
1131 printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1132 if (!dsp_err_mmu_isset()) {
1133 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1136 dspadr = dsp_fault_adr & ~(SZ_4K-1);
1137 dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM); /* FIXME: reserve TLB entry for this */
1138 printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1139 dsp_runlevel(OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY);
1142 dsp_exunmap(dspadr);
1143 dsp_err_mmu_clear();
1147 static void dsp_mmu_init(void)
1152 clk_use(dsp_ck_handle);
1153 down_write(&exmap_sem);
1155 dsp_mmu_disable(); /* clear all */
1159 /* mapping for ARM MMU */
1160 phys = __pa(dspvect_page);
1161 virt = dspbyte_to_virt(DSP_INIT_PAGE); /* 0xe0fff000 */
1162 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1163 exmap_tbl[0].buf = dspvect_page;
1164 exmap_tbl[0].vadr = virt;
1165 exmap_tbl[0].usecount = 0;
1166 exmap_tbl[0].order = 0;
1167 exmap_tbl[0].valid = 1;
1168 exmap_tbl[0].cntnu = 0;
1170 /* DSP TLB initialization */
1172 /* preserved, full access */
1173 dsp_mmu_load_tlb(DSP_INIT_PAGE, phys, DSPMMU_CAM_L_SLST_4KB,
1174 DSPMMU_CAM_L_P, DSPMMU_RAM_L_AP_FA);
1175 up_write(&exmap_sem);
1176 clk_unuse(dsp_ck_handle);
1179 static void dsp_mmu_shutdown(void)
1182 dsp_mmu_disable(); /* clear all */
1186 * intmem_enable() / disable():
1187 * if the address is in DSP internal memories,
1188 * we send PM mailbox commands so that DSP DMA domain won't go in idle
1189 * when ARM is accessing to those memories.
1191 static int intmem_enable(void)
1196 ret = dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_ENABLE,
1197 DSPREG_ICR_DMA_IDLE_DOMAIN);
1202 static void intmem_disable(void) {
1204 dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_DISABLE,
1205 DSPREG_ICR_DMA_IDLE_DOMAIN);
1209 * dsp_mem_enable() / disable()
1211 int intmem_usecount;
1213 int dsp_mem_enable(void *adr)
1217 if (is_dsp_internal_mem(adr)) {
1218 if (intmem_usecount++ == 0)
1219 ret = omap_dsp_request_mem();
1221 down_read(&exmap_sem);
1226 void dsp_mem_disable(void *adr)
1228 if (is_dsp_internal_mem(adr)) {
1229 if (--intmem_usecount == 0)
1230 omap_dsp_release_mem();
1232 up_read(&exmap_sem);
1236 void dsp_mem_usecount_clear(void)
1238 if (intmem_usecount != 0) {
1240 "omapdsp: unbalanced memory request/release detected.\n"
1241 " intmem_usecount is not zero at where "
1242 "it should be! ... fixed to be zero.\n");
1243 intmem_usecount = 0;
1244 omap_dsp_release_mem();
1249 * dsp_mem file operations
1251 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1255 down(&file->f_dentry->d_inode->i_sem);
1258 file->f_pos = offset;
1262 file->f_pos += offset;
1268 up(&file->f_dentry->d_inode->i_sem);
1272 static ssize_t intmem_read(struct file *file, char *buf, size_t count,
1275 unsigned long p = *ppos;
1276 void *vadr = dspbyte_to_virt(p);
1277 ssize_t size = dspmem_size;
1282 clk_use(api_ck_handle);
1284 if (count > size - p)
1286 if (copy_to_user(buf, vadr, read)) {
1292 clk_unuse(api_ck_handle);
1296 static ssize_t exmem_read(struct file *file, char *buf, size_t count,
1299 unsigned long p = *ppos;
1300 void *vadr = dspbyte_to_virt(p);
1302 if (!exmap_valid(vadr, count)) {
1304 "omapdsp: DSP address %08lx / size %08x "
1305 "is not valid!\n", p, count);
1308 if (count > DSPSPACE_SIZE - p)
1309 count = DSPSPACE_SIZE - p;
1310 if (copy_to_user(buf, vadr, count))
1317 static ssize_t dsp_mem_read(struct file *file, char *buf, size_t count,
1321 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1323 if (dsp_mem_enable(vadr) < 0)
1325 if (is_dspbyte_internal_mem(*ppos))
1326 ret = intmem_read(file, buf, count, ppos);
1328 ret = exmem_read(file, buf, count, ppos);
1329 dsp_mem_disable(vadr);
1334 static ssize_t intmem_write(struct file *file, const char *buf, size_t count,
1337 unsigned long p = *ppos;
1338 void *vadr = dspbyte_to_virt(p);
1339 ssize_t size = dspmem_size;
1344 clk_use(api_ck_handle);
1346 if (count > size - p)
1348 if (copy_from_user(vadr, buf, written)) {
1354 clk_unuse(api_ck_handle);
1358 static ssize_t exmem_write(struct file *file, const char *buf, size_t count,
1361 unsigned long p = *ppos;
1362 void *vadr = dspbyte_to_virt(p);
1364 if (!exmap_valid(vadr, count)) {
1366 "omapdsp: DSP address %08lx / size %08x "
1367 "is not valid!\n", p, count);
1370 if (count > DSPSPACE_SIZE - p)
1371 count = DSPSPACE_SIZE - p;
1372 if (copy_from_user(vadr, buf, count))
1379 static ssize_t dsp_mem_write(struct file *file, const char *buf, size_t count,
1383 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1385 if (dsp_mem_enable(vadr) < 0)
1387 if (is_dspbyte_internal_mem(*ppos))
1388 ret = intmem_write(file, buf, count, ppos);
1390 ret = exmem_write(file, buf, count, ppos);
1391 dsp_mem_disable(vadr);
1396 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1397 unsigned int cmd, unsigned long arg)
1400 case OMAP_DSP_MEM_IOCTL_MMUINIT:
1404 case OMAP_DSP_MEM_IOCTL_EXMAP:
1406 struct omap_dsp_mapinfo mapinfo;
1407 if (copy_from_user(&mapinfo, (void *)arg,
1410 return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1414 case OMAP_DSP_MEM_IOCTL_EXUNMAP:
1415 return dsp_exunmap((unsigned long)arg);
1417 case OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH:
1421 case OMAP_DSP_MEM_IOCTL_FBEXPORT:
1423 unsigned long dspadr;
1425 if (copy_from_user(&dspadr, (void *)arg, sizeof(long)))
1427 ret = dsp_fbexport(&dspadr);
1428 if (copy_to_user((void *)arg, &dspadr, sizeof(long)))
1433 case OMAP_DSP_MEM_IOCTL_MMUITACK:
1434 return dsp_mmu_itack();
1436 case OMAP_DSP_MEM_IOCTL_KMEM_RESERVE:
1439 if (copy_from_user(&size, (void *)arg, sizeof(long)))
1441 return dsp_kmem_reserve(size);
1444 case OMAP_DSP_MEM_IOCTL_KMEM_RELEASE:
1449 return -ENOIOCTLCMD;
1453 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1461 static int dsp_mem_open(struct inode *inode, struct file *file)
1463 if (!capable(CAP_SYS_RAWIO))
1469 static int dsp_mem_release(struct inode *inode, struct file *file)
1477 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
1484 clk_use(dsp_ck_handle);
1485 down_read(&exmap_sem);
1487 get_tlb_lock(&lbase, &victim);
1489 len = sprintf(buf, "p: preserved, v: valid\n"
1490 "ety cam_va ram_pa sz ap\n");
1491 /* 00: p v 0x300000 0x10171800 64KB FA */
1492 for (i = 0; i < 32; i++) {
1493 unsigned short cam_h, cam_l, ram_h, ram_l;
1494 unsigned short cam_l_va_mask, prsvd, cam_vld, slst;
1495 unsigned long cam_va;
1496 unsigned short ram_l_ap;
1497 unsigned long ram_pa;
1498 char *pgsz_str, *ap_str;
1500 /* read a TLB entry */
1501 __read_tlb(lbase, i, &cam_h, &cam_l, &ram_h, &ram_l);
1503 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
1504 cam_l_va_mask = get_cam_l_va_mask(slst);
1505 pgsz_str = (slst == DSPMMU_CAM_L_SLST_1MB) ? " 1MB":
1506 (slst == DSPMMU_CAM_L_SLST_64KB)? "64KB":
1507 (slst == DSPMMU_CAM_L_SLST_4KB) ? " 4KB":
1509 prsvd = cam_l & DSPMMU_CAM_L_P;
1510 cam_vld = cam_l & DSPMMU_CAM_L_V;
1511 ram_l_ap = ram_l & DSPMMU_RAM_L_AP_MASK;
1512 ap_str = (ram_l_ap == DSPMMU_RAM_L_AP_RO) ? "RO":
1513 (ram_l_ap == DSPMMU_RAM_L_AP_FA) ? "FA":
1515 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
1516 (unsigned long)(cam_l & cam_l_va_mask) << 6;
1517 ram_pa = (unsigned long)ram_h << 16 |
1518 (ram_l & DSPMMU_RAM_L_RAM_LSB_MASK);
1521 len += sprintf(buf + len, "lock base = %d\n", lbase);
1523 len += sprintf(buf + len, "victim = %d\n", victim);
1524 /* 00: p v 0x300000 0x10171800 64KB FA */
1525 len += sprintf(buf + len,
1526 "%02d: %c %c 0x%06lx 0x%08lx %s %s\n",
1529 cam_vld ? 'v' : ' ',
1530 cam_va, ram_pa, pgsz_str, ap_str);
1533 /* restore victim entry */
1534 set_tlb_lock(lbase, victim);
1536 up_read(&exmap_sem);
1537 clk_unuse(dsp_ck_handle);
1541 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
1543 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1549 down_read(&exmap_sem);
1550 len = sprintf(buf, "v: valid, c: cntnu\n"
1551 "ety vadr buf od uc\n");
1552 /* 00: v c 0xe0300000 0xc0171800 0 */
1553 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1554 struct exmap_tbl *ent = &exmap_tbl[i];
1555 /* 00: v c 0xe0300000 0xc0171800 0 */
1556 len += sprintf(buf + len, "%02d: %c %c 0x%8p 0x%8p %2d %2d\n",
1558 ent->valid ? 'v' : ' ',
1559 ent->cntnu ? 'c' : ' ',
1560 ent->vadr, ent->buf, ent->order, ent->usecount);
1563 up_read(&exmap_sem);
1567 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
1569 static ssize_t kmem_pool_show(struct device *dev,
1570 struct device_attribute *attr, char *buf)
1572 int count_1M, count_64K, total;
1574 count_1M = kmem_pool_1M.count;
1575 count_64K = kmem_pool_64K.count;
1576 total = count_1M * SZ_1MB + count_64K * SZ_64KB;
1578 return sprintf(buf, "0x%x %d %d\n", total, count_1M, count_64K);
1581 static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool);
1584 * DSP MMU interrupt handler
1589 * We ignore prefetch err.
1591 #define MMUFAULT_MASK \
1592 (DSPMMU_FAULT_ST_PERM |\
1593 DSPMMU_FAULT_ST_TLB_MISS |\
1594 DSPMMU_FAULT_ST_TRANS)
1595 irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1597 unsigned short status;
1598 unsigned short adh, adl;
1601 status = omap_readw(DSPMMU_FAULT_ST);
1602 adh = omap_readw(DSPMMU_FAULT_AD_H);
1603 adl = omap_readw(DSPMMU_FAULT_AD_L);
1604 dp = adh & DSPMMU_FAULT_AD_H_DP;
1605 dsp_fault_adr = MKLONG(adh & DSPMMU_FAULT_AD_H_ADR_MASK, adl);
1606 /* if the fault is masked, nothing to do */
1607 if ((status & MMUFAULT_MASK) == 0) {
1608 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
1610 * note: in OMAP1710,
1611 * when CACHE + DMA domain gets out of idle in DSP,
1612 * MMU interrupt occurs but DSPMMU_FAULT_ST is not set.
1613 * in this case, we just ignore the interrupt.
1616 printk(KERN_DEBUG "%s%s%s%s\n",
1617 (status & DSPMMU_FAULT_ST_PREF)?
1618 " (prefetch err)" : "",
1619 (status & DSPMMU_FAULT_ST_PERM)?
1620 " (permission fault)" : "",
1621 (status & DSPMMU_FAULT_ST_TLB_MISS)?
1623 (status & DSPMMU_FAULT_ST_TRANS) ?
1624 " (translation fault)": "");
1626 "fault address = %s: 0x%06lx\n",
1627 dp ? "DATA" : "PROGRAM",
1633 printk(KERN_INFO "DSP MMU interrupt!\n");
1634 printk(KERN_INFO "%s%s%s%s\n",
1635 (status & DSPMMU_FAULT_ST_PREF)?
1636 (MMUFAULT_MASK & DSPMMU_FAULT_ST_PREF)?
1640 (status & DSPMMU_FAULT_ST_PERM)?
1641 (MMUFAULT_MASK & DSPMMU_FAULT_ST_PERM)?
1642 " permission fault":
1643 " (permission fault)":
1645 (status & DSPMMU_FAULT_ST_TLB_MISS)?
1646 (MMUFAULT_MASK & DSPMMU_FAULT_ST_TLB_MISS)?
1650 (status & DSPMMU_FAULT_ST_TRANS)?
1651 (MMUFAULT_MASK & DSPMMU_FAULT_ST_TRANS)?
1652 " translation fault":
1653 " (translation fault)":
1655 printk(KERN_INFO "fault address = %s: 0x%06lx\n",
1656 dp ? "DATA" : "PROGRAM",
1659 if (dsp_is_ready()) {
1661 * If we call dsp_exmap() here,
1662 * "kernel BUG at slab.c" occurs.
1665 dsp_err_mmu_set(dsp_fault_adr);
1667 disable_irq(INT_DSP_MMU);
1669 printk(KERN_INFO "Resetting DSP...\n");
1670 dsp_cpustat_request(CPUSTAT_RESET);
1671 enable_irq(INT_DSP_MMU);
1673 * if we enable followings, semaphore lock should be avoided.
1675 printk(KERN_INFO "Flushing DSP MMU...\n");
1687 struct file_operations dsp_mem_fops = {
1688 .owner = THIS_MODULE,
1689 .llseek = dsp_mem_lseek,
1690 .read = dsp_mem_read,
1691 .write = dsp_mem_write,
1692 .ioctl = dsp_mem_ioctl,
1693 .mmap = dsp_mem_mmap,
1694 .open = dsp_mem_open,
1695 .release = dsp_mem_release,
1698 void dsp_mem_start(void)
1700 dsp_register_mem_cb(intmem_enable, intmem_disable);
1703 void dsp_mem_stop(void)
1705 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
1706 dsp_unregister_mem_cb();
1709 int __init dsp_mem_init(void)
1713 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1714 exmap_tbl[i].valid = 0;
1717 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
1718 if (dspvect_page == NULL) {
1720 "omapdsp: failed to allocate memory "
1721 "for dsp vector table\n");
1725 dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
1727 device_create_file(&dsp_device.dev, &dev_attr_mmu);
1728 device_create_file(&dsp_device.dev, &dev_attr_exmap);
1729 device_create_file(&dsp_device.dev, &dev_attr_kmem_pool);
1734 void dsp_mem_exit(void)
1739 if (dspvect_page != NULL) {
1742 down_read(&exmap_sem);
1744 virt = (unsigned long)dspbyte_to_virt(DSP_INIT_PAGE);
1745 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
1746 free_page((unsigned long)dspvect_page);
1747 dspvect_page = NULL;
1749 up_read(&exmap_sem);
1752 device_remove_file(&dsp_device.dev, &dev_attr_mmu);
1753 device_remove_file(&dsp_device.dev, &dev_attr_exmap);
1754 device_remove_file(&dsp_device.dev, &dev_attr_kmem_pool);