2 * linux/arch/arm/mach-omap2/mmu.c
4 * Support for non-MPU OMAP2 MMUs.
6 * Copyright (C) 2002-2007 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <paul.mundt@nokia.com>
11 * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/types.h>
28 #include <linux/init.h>
29 #include <linux/rwsem.h>
30 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/err.h>
35 #include <asm/arch/mmu.h>
36 #include <asm/tlbflush.h>
38 #include <asm/sizes.h>
40 static void *dspvect_page;
41 #define DSP_INIT_PAGE 0xfff000
44 omap2_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
46 cr->cam = omap_mmu_read_reg(mmu, MMU_READ_CAM);
47 cr->ram = omap_mmu_read_reg(mmu, MMU_READ_RAM);
51 omap2_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
53 /* Set the CAM and RAM entries */
54 omap_mmu_write_reg(mmu, cr->cam | OMAP_MMU_CAM_V, MMU_CAM);
55 omap_mmu_write_reg(mmu, cr->ram, MMU_RAM);
58 static void exmap_setup_iomap_page(struct omap_mmu *mmu, unsigned long phys,
59 unsigned long dsp_io_adr, int index)
63 struct omap_mmu_tlb_entry tlb_ent;
65 dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
66 virt = omap_mmu_to_virt(mmu, dspadr);
67 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
68 INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, NULL, virt);
69 INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
70 omap_mmu_load_pte_entry(mmu, &tlb_ent);
73 static void exmap_clear_iomap_page(struct omap_mmu *mmu,
74 unsigned long dsp_io_adr)
79 dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
80 virt = omap_mmu_to_virt(mmu, dspadr);
81 exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
82 /* DSP MMU is shutting down. not handled here. */
85 #define OMAP24XX_MAILBOX_BASE (L4_24XX_BASE + 0x94000)
86 #define OMAP2420_GPT5_BASE (L4_24XX_BASE + 0x7c000)
87 #define OMAP2420_GPT6_BASE (L4_24XX_BASE + 0x7e000)
88 #define OMAP2420_GPT7_BASE (L4_24XX_BASE + 0x80000)
89 #define OMAP2420_GPT8_BASE (L4_24XX_BASE + 0x82000)
90 #define OMAP24XX_EAC_BASE (L4_24XX_BASE + 0x90000)
91 #define OMAP24XX_STI_BASE (L4_24XX_BASE + 0x68000)
92 #define OMAP24XX_STI_CH_BASE (L4_24XX_BASE + 0x0c000000)
94 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
98 exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
100 /* REVISIT: This will need to be revisited for 3430 */
101 exmap_setup_iomap_page(mmu, OMAP2_PRCM_BASE, 0x7000, n++);
102 exmap_setup_iomap_page(mmu, OMAP24XX_MAILBOX_BASE, 0x11000, n++);
104 if (cpu_is_omap2420()) {
105 exmap_setup_iomap_page(mmu, OMAP2420_GPT5_BASE, 0xe000, n++);
106 exmap_setup_iomap_page(mmu, OMAP2420_GPT6_BASE, 0xe800, n++);
107 exmap_setup_iomap_page(mmu, OMAP2420_GPT7_BASE, 0xf000, n++);
108 exmap_setup_iomap_page(mmu, OMAP2420_GPT8_BASE, 0xf800, n++);
109 exmap_setup_iomap_page(mmu, OMAP24XX_EAC_BASE, 0x10000, n++);
110 exmap_setup_iomap_page(mmu, OMAP24XX_STI_BASE, 0xc800, n++);
111 for (i = 0; i < 5; i++)
112 exmap_setup_preserved_mem_page(mmu,
113 __va(OMAP24XX_STI_CH_BASE + i*SZ_4K),
114 0xfb0000 + i*SZ_4K, n++);
120 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
124 exmap_clear_iomap_page(mmu, 0x7000); /* PRCM registers */
125 exmap_clear_iomap_page(mmu, 0x11000); /* MAILBOX registers */
127 if (cpu_is_omap2420()) {
128 exmap_clear_iomap_page(mmu, 0xe000); /* GPT5 */
129 exmap_clear_iomap_page(mmu, 0xe800); /* GPT6 */
130 exmap_clear_iomap_page(mmu, 0xf000); /* GPT7 */
131 exmap_clear_iomap_page(mmu, 0xf800); /* GPT8 */
132 exmap_clear_iomap_page(mmu, 0x10000); /* EAC */
133 exmap_clear_iomap_page(mmu, 0xc800); /* STI */
134 for (i = 0; i < 5; i++) /* STI CH */
135 exmap_clear_mem_page(mmu, 0xfb0000 + i*SZ_4K);
138 exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
141 #define MMU_IRQ_MASK \
142 (OMAP_MMU_IRQ_MULTIHITFAULT | \
143 OMAP_MMU_IRQ_TABLEWALKFAULT | \
144 OMAP_MMU_IRQ_EMUMISS | \
145 OMAP_MMU_IRQ_TRANSLATIONFAULT)
147 static int omap2_mmu_startup(struct omap_mmu *mmu)
149 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
150 if (dspvect_page == NULL) {
151 printk(KERN_ERR "MMU: failed to allocate memory "
152 "for dsp vector table\n");
156 mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
158 omap_mmu_write_reg(mmu, MMU_IRQ_MASK, MMU_IRQENABLE);
163 static void omap2_mmu_shutdown(struct omap_mmu *mmu)
165 exmap_clear_preserved_entries(mmu);
167 if (dspvect_page != NULL) {
170 down_read(&mmu->exmap_sem);
172 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
173 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
174 free_page((unsigned long)dspvect_page);
177 up_read(&mmu->exmap_sem);
181 static ssize_t omap2_mmu_show(struct omap_mmu *mmu, char *buf,
182 struct omap_mmu_tlb_lock *tlb_lock)
186 len = sprintf(buf, "P: preserved, V: valid\n"
187 "B: big endian, L:little endian, "
188 "M: mixed page attribute\n"
189 "ety P V size cam_va ram_pa E ES M\n");
190 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
192 for (i = 0; i < mmu->nr_tlb_entries; i++) {
193 struct omap_mmu_tlb_entry ent;
194 struct cam_ram_regset cr;
195 struct omap_mmu_tlb_lock entry_lock;
196 char *pgsz_str, *elsz_str;
198 /* read a TLB entry */
199 entry_lock.base = tlb_lock->base;
200 entry_lock.victim = i;
201 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
203 ent.pgsz = cr.cam & OMAP_MMU_CAM_PAGESIZE_MASK;
204 ent.prsvd = cr.cam & OMAP_MMU_CAM_P;
205 ent.valid = cr.cam & OMAP_MMU_CAM_V;
206 ent.va = cr.cam & OMAP_MMU_CAM_VATAG_MASK;
207 ent.endian = cr.ram & OMAP_MMU_RAM_ENDIANNESS;
208 ent.elsz = cr.ram & OMAP_MMU_RAM_ELEMENTSIZE_MASK;
209 ent.pa = cr.ram & OMAP_MMU_RAM_PADDR_MASK;
210 ent.mixed = cr.ram & OMAP_MMU_RAM_MIXED;
212 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
213 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
214 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
215 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
217 elsz_str = (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_8) ? " 8":
218 (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_16) ? "16":
219 (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_32) ? "32":
222 if (i == tlb_lock->base)
223 len += sprintf(buf + len, "lock base = %d\n",
225 if (i == tlb_lock->victim)
226 len += sprintf(buf + len, "victim = %d\n",
229 len += sprintf(buf + len,
230 /* 00: P V 4KB 0x300000 0x10171800 B 16 M */
231 "%02d: %c %c %s 0x%06lx 0x%08lx %c %s %c\n",
233 ent.prsvd ? 'P' : ' ',
234 ent.valid ? 'V' : ' ',
235 pgsz_str, ent.va, ent.pa,
236 ent.endian ? 'B' : 'L',
238 ent.mixed ? 'M' : ' ');
244 #define get_cam_va_mask(pgsz) \
245 (((pgsz) == OMAP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
246 ((pgsz) == OMAP_MMU_CAM_PAGESIZE_1MB) ? 0xfff00000 : \
247 ((pgsz) == OMAP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
248 ((pgsz) == OMAP_MMU_CAM_PAGESIZE_4KB) ? 0xfffff000 : 0)
250 static inline unsigned long omap2_mmu_cam_va(struct cam_ram_regset *cr)
252 unsigned int page_size = cr->cam & OMAP_MMU_CAM_PAGESIZE_MASK;
253 unsigned int mask = get_cam_va_mask(cr->cam & page_size);
255 return cr->cam & mask;
258 static struct cam_ram_regset *
259 omap2_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
261 struct cam_ram_regset *cr;
263 if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
264 printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
265 "aligned boundary\n", entry->va);
266 return ERR_PTR(-EINVAL);
269 cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
271 cr->cam = (entry->va & OMAP_MMU_CAM_VATAG_MASK) |
272 entry->prsvd | entry->pgsz;
273 cr->ram = entry->pa | entry->endian | entry->elsz;
278 static inline int omap2_mmu_cam_ram_valid(struct cam_ram_regset *cr)
280 return cr->cam & OMAP_MMU_CAM_V;
283 static void omap2_mmu_interrupt(struct omap_mmu *mmu)
285 unsigned long status, va;
287 status = MMU_IRQ_MASK & omap_mmu_read_reg(mmu, MMU_IRQSTATUS);
288 va = omap_mmu_read_reg(mmu, MMU_FAULT_AD);
290 pr_info("%s\n", (status & OMAP_MMU_IRQ_MULTIHITFAULT) ? "multi hit":"");
291 pr_info("%s\n", (status & OMAP_MMU_IRQ_TABLEWALKFAULT) ? "table walk fault":"");
292 pr_info("%s\n", (status & OMAP_MMU_IRQ_EMUMISS) ? "EMU miss":"");
293 pr_info("%s\n", (status & OMAP_MMU_IRQ_TRANSLATIONFAULT) ? "translation fault":"");
294 pr_info("%s\n", (status & OMAP_MMU_IRQ_TLBMISS) ? "TLB miss":"");
295 pr_info("fault address = %#08lx\n", va);
297 omap_mmu_disable(mmu);
298 omap_mmu_write_reg(mmu, status, MMU_IRQSTATUS);
300 mmu->fault_address = va;
301 schedule_work(&mmu->irq_work);
304 static pgprot_t omap2_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
308 attr = entry->mixed << 5;
309 attr |= entry->endian;
310 attr |= entry->elsz >> 3;
311 attr <<= ((entry->pgsz & OMAP_MMU_CAM_PAGESIZE_4KB) ? 0:6);
316 struct omap_mmu_ops omap2_mmu_ops = {
317 .startup = omap2_mmu_startup,
318 .shutdown = omap2_mmu_shutdown,
319 .read_tlb = omap2_mmu_read_tlb,
320 .load_tlb = omap2_mmu_load_tlb,
321 .show = omap2_mmu_show,
322 .cam_va = omap2_mmu_cam_va,
323 .cam_ram_alloc = omap2_mmu_cam_ram_alloc,
324 .cam_ram_valid = omap2_mmu_cam_ram_valid,
325 .interrupt = omap2_mmu_interrupt,
326 .pte_get_attr = omap2_mmu_pte_get_attr,
328 EXPORT_SYMBOL_GPL(omap2_mmu_ops);
330 MODULE_LICENSE("GPL");