]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mach-omap2/mmu.c
REMOVE OMAP LEGACY CODE: Reset mach-omap1/board-*.c files to mainline
[linux-2.6-omap-h63xx.git] / arch / arm / mach-omap2 / mmu.c
1 /*
2  * linux/arch/arm/mach-omap2/mmu.c
3  *
4  * Support for non-MPU OMAP2 MMUs.
5  *
6  * Copyright (C) 2002-2007 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/rwsem.h>
20 #include <linux/device.h>
21 #include <linux/mm.h>
22 #include <linux/interrupt.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include "mmu.h"
26 #include <mach/mmu.h>
27 #include <asm/tlbflush.h>
28 #include <asm/sizes.h>
29
30 static void *dspvect_page;
31 #define DSP_INIT_PAGE   0xfff000
32
33 static inline void
34 omap2_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
35 {
36         cr->cam = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM);
37         cr->ram = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM);
38 }
39
40 static inline void
41 omap2_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
42 {
43         /* Set the CAM and RAM entries */
44         omap_mmu_write_reg(mmu, cr->cam | OMAP_MMU_CAM_V, OMAP_MMU_CAM);
45         omap_mmu_write_reg(mmu, cr->ram, OMAP_MMU_RAM);
46 }
47
48 static void exmap_setup_iomap_page(struct omap_mmu *mmu, unsigned long phys,
49                                    unsigned long dsp_io_adr, int index)
50 {
51         unsigned long dspadr;
52         void *virt;
53         struct omap_mmu_tlb_entry tlb_ent;
54
55         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
56         virt = omap_mmu_to_virt(mmu, dspadr);
57         exmap_set_armmmu(mmu, (unsigned long)virt, phys, PAGE_SIZE);
58         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, NULL, virt);
59         INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
60         omap_mmu_load_pte_entry(mmu, &tlb_ent);
61 }
62
63 static void exmap_clear_iomap_page(struct omap_mmu *mmu,
64                                    unsigned long dsp_io_adr)
65 {
66         unsigned long dspadr;
67         void *virt;
68
69         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
70         virt = omap_mmu_to_virt(mmu, dspadr);
71         exmap_clear_armmmu(mmu, (unsigned long)virt, PAGE_SIZE);
72         /* DSP MMU is shutting down. not handled here. */
73 }
74
75 #define OMAP24XX_MAILBOX_BASE   (L4_24XX_BASE + 0x94000)
76 #define OMAP2420_GPT5_BASE      (L4_24XX_BASE + 0x7c000)
77 #define OMAP2420_GPT6_BASE      (L4_24XX_BASE + 0x7e000)
78 #define OMAP2420_GPT7_BASE      (L4_24XX_BASE + 0x80000)
79 #define OMAP2420_GPT8_BASE      (L4_24XX_BASE + 0x82000)
80 #define OMAP24XX_EAC_BASE       (L4_24XX_BASE + 0x90000)
81 #define OMAP24XX_STI_BASE       (L4_24XX_BASE + 0x68000)
82 #define OMAP24XX_STI_CH_BASE    (L4_24XX_BASE + 0x0c000000)
83
84 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
85 {
86         int i, n = 0;
87
88         exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
89
90         /* REVISIT: This will need to be revisited for 3430 */
91         exmap_setup_iomap_page(mmu, OMAP2_PRCM_BASE, 0x7000, n++);
92         exmap_setup_iomap_page(mmu, OMAP24XX_MAILBOX_BASE, 0x11000, n++);
93
94         if (cpu_is_omap2420()) {
95                 exmap_setup_iomap_page(mmu, OMAP2420_GPT5_BASE, 0xe000, n++);
96                 exmap_setup_iomap_page(mmu, OMAP2420_GPT6_BASE, 0xe800, n++);
97                 exmap_setup_iomap_page(mmu, OMAP2420_GPT7_BASE, 0xf000, n++);
98                 exmap_setup_iomap_page(mmu, OMAP2420_GPT8_BASE, 0xf800, n++);
99                 exmap_setup_iomap_page(mmu, OMAP24XX_EAC_BASE,  0x10000, n++);
100                 exmap_setup_iomap_page(mmu, OMAP24XX_STI_BASE, 0xc800, n++);
101                 for (i = 0; i < 5; i++)
102                         exmap_setup_preserved_mem_page(mmu,
103                                 __va(OMAP24XX_STI_CH_BASE + i*SZ_4K),
104                                 0xfb0000 + i*SZ_4K, n++);
105         }
106
107         return n;
108 }
109
110 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
111 {
112         int i;
113
114         exmap_clear_iomap_page(mmu, 0x7000);    /* PRCM registers */
115         exmap_clear_iomap_page(mmu, 0x11000);   /* MAILBOX registers */
116
117         if (cpu_is_omap2420()) {
118                 exmap_clear_iomap_page(mmu, 0xe000);    /* GPT5 */
119                 exmap_clear_iomap_page(mmu, 0xe800);    /* GPT6 */
120                 exmap_clear_iomap_page(mmu, 0xf000);    /* GPT7 */
121                 exmap_clear_iomap_page(mmu, 0xf800);    /* GPT8 */
122                 exmap_clear_iomap_page(mmu, 0x10000);   /* EAC */
123                 exmap_clear_iomap_page(mmu, 0xc800);    /* STI */
124                 for (i = 0; i < 5; i++)                 /* STI CH */
125                         exmap_clear_mem_page(mmu, 0xfb0000 + i*SZ_4K);
126         }
127
128         exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
129 }
130
131 #define MMU_IRQ_MASK \
132         (OMAP_MMU_IRQ_MULTIHITFAULT | \
133          OMAP_MMU_IRQ_TABLEWALKFAULT | \
134          OMAP_MMU_IRQ_EMUMISS | \
135          OMAP_MMU_IRQ_TRANSLATIONFAULT)
136
137 static int omap2_mmu_startup(struct omap_mmu *mmu)
138 {
139         u32 rev = omap_mmu_read_reg(mmu, OMAP_MMU_REVISION);
140
141         pr_info("MMU: OMAP %s MMU initialized (HW v%d.%d)\n", mmu->name,
142                 (rev >> 4) & 0xf, rev & 0xf);
143
144         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
145         if (dspvect_page == NULL) {
146                 dev_err(mmu->dev, "MMU %s: failed to allocate memory "
147                         "for vector table\n", mmu->name);
148                 return -ENOMEM;
149         }
150
151         mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
152
153         omap_mmu_write_reg(mmu, MMU_IRQ_MASK, OMAP_MMU_IRQENABLE);
154
155         return 0;
156 }
157
158 static void omap2_mmu_shutdown(struct omap_mmu *mmu)
159 {
160         exmap_clear_preserved_entries(mmu);
161
162         if (dspvect_page != NULL) {
163                 unsigned long virt;
164
165                 down_read(&mmu->exmap_sem);
166
167                 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
168                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
169                 free_page((unsigned long)dspvect_page);
170                 dspvect_page = NULL;
171
172                 up_read(&mmu->exmap_sem);
173         }
174 }
175
176 static ssize_t omap2_mmu_show(struct omap_mmu *mmu, char *buf,
177                               struct omap_mmu_tlb_lock *tlb_lock)
178 {
179         int i, len;
180
181         len = sprintf(buf, "P: preserved, V: valid\n"
182                            "B: big endian, L:little endian, "
183                            "M: mixed page attribute\n"
184                            "ety P V size   cam_va     ram_pa E ES M\n");
185                          /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
186
187         for (i = 0; i < mmu->nr_tlb_entries; i++) {
188                 struct omap_mmu_tlb_entry ent;
189                 struct cam_ram_regset cr;
190                 struct omap_mmu_tlb_lock entry_lock;
191                 char *pgsz_str, *elsz_str;
192
193                 /* read a TLB entry */
194                 entry_lock.base   = tlb_lock->base;
195                 entry_lock.victim = i;
196                 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
197
198                 ent.pgsz   = cr.cam & OMAP_MMU_CAM_PAGESIZE_MASK;
199                 ent.prsvd  = cr.cam & OMAP_MMU_CAM_P;
200                 ent.valid  = cr.cam & OMAP_MMU_CAM_V;
201                 ent.va     = cr.cam & OMAP_MMU_CAM_VATAG_MASK;
202                 ent.endian = cr.ram & OMAP_MMU_RAM_ENDIANNESS;
203                 ent.elsz   = cr.ram & OMAP_MMU_RAM_ELEMENTSIZE_MASK;
204                 ent.pa     = cr.ram & OMAP_MMU_RAM_PADDR_MASK;
205                 ent.mixed  = cr.ram & OMAP_MMU_RAM_MIXED;
206
207                 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
208                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
209                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
210                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
211                                                                      " ???";
212                 elsz_str = (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_8)  ? " 8":
213                            (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_16) ? "16":
214                            (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_32) ? "32":
215                                                                       "??";
216
217                 if (i == tlb_lock->base)
218                         len += sprintf(buf + len, "lock base = %d\n",
219                                        tlb_lock->base);
220                 if (i == tlb_lock->victim)
221                         len += sprintf(buf + len, "victim    = %d\n",
222                                        tlb_lock->victim);
223
224                 len += sprintf(buf + len,
225                                /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
226                                "%02d: %c %c %s 0x%06lx 0x%08lx %c %s %c\n",
227                                i,
228                                ent.prsvd ? 'P' : ' ',
229                                ent.valid ? 'V' : ' ',
230                                pgsz_str, ent.va, ent.pa,
231                                ent.endian ? 'B' : 'L',
232                                elsz_str,
233                                ent.mixed ? 'M' : ' ');
234         }
235
236         return len;
237 }
238
239 #define get_cam_va_mask(pgsz) \
240         (((pgsz) == OMAP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
241          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_1MB)  ? 0xfff00000 : \
242          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
243          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_4KB)  ? 0xfffff000 : 0)
244
245 static inline unsigned long omap2_mmu_cam_va(struct cam_ram_regset *cr)
246 {
247         unsigned int page_size = cr->cam & OMAP_MMU_CAM_PAGESIZE_MASK;
248         unsigned int mask = get_cam_va_mask(cr->cam & page_size);
249
250         return cr->cam & mask;
251 }
252
253 static struct cam_ram_regset *
254 omap2_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
255 {
256         struct cam_ram_regset *cr;
257
258         if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
259                 dev_err(mmu->dev, "MMU %s: mapping vadr (0x%06lx) is not on"
260                         " an aligned boundary\n", mmu->name, entry->va);
261                 return ERR_PTR(-EINVAL);
262         }
263
264         cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
265
266         cr->cam = (entry->va & OMAP_MMU_CAM_VATAG_MASK) |
267                   entry->prsvd | entry->pgsz;
268         cr->ram = entry->pa | entry->endian | entry->elsz;
269
270         return cr;
271 }
272
273 static inline int omap2_mmu_cam_ram_valid(struct cam_ram_regset *cr)
274 {
275         return cr->cam & OMAP_MMU_CAM_V;
276 }
277
278 static void omap2_mmu_interrupt(struct omap_mmu *mmu)
279 {
280         unsigned long status, va;
281
282         status = MMU_IRQ_MASK & omap_mmu_read_reg(mmu, OMAP_MMU_IRQSTATUS);
283         va = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD);
284
285         pr_info("%s\n", (status & OMAP_MMU_IRQ_MULTIHITFAULT)?
286                 "multi hit":"");
287         pr_info("%s\n", (status & OMAP_MMU_IRQ_TABLEWALKFAULT)?
288                 "table walk fault":"");
289         pr_info("%s\n", (status & OMAP_MMU_IRQ_EMUMISS)?
290                 "EMU miss":"");
291         pr_info("%s\n", (status & OMAP_MMU_IRQ_TRANSLATIONFAULT)?
292                 "translation fault":"");
293         pr_info("%s\n", (status & OMAP_MMU_IRQ_TLBMISS)?
294                 "TLB miss":"");
295         pr_info("fault address = %#08lx\n", va);
296
297         omap_mmu_disable(mmu);
298         omap_mmu_write_reg(mmu, status, OMAP_MMU_IRQSTATUS);
299
300         mmu->fault_address = va;
301         schedule_work(&mmu->irq_work);
302 }
303
304 static pgprot_t omap2_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
305 {
306         u32 attr;
307
308         attr = entry->mixed << 5;
309         attr |= entry->endian;
310         attr |= entry->elsz >> 3;
311         attr <<= ((entry->pgsz & OMAP_MMU_CAM_PAGESIZE_4KB) ? 0:6);
312
313         return attr;
314 }
315
316 struct omap_mmu_ops omap2_mmu_ops = {
317         .startup        = omap2_mmu_startup,
318         .shutdown       = omap2_mmu_shutdown,
319         .read_tlb       = omap2_mmu_read_tlb,
320         .load_tlb       = omap2_mmu_load_tlb,
321         .show           = omap2_mmu_show,
322         .cam_va         = omap2_mmu_cam_va,
323         .cam_ram_alloc  = omap2_mmu_cam_ram_alloc,
324         .cam_ram_valid  = omap2_mmu_cam_ram_valid,
325         .interrupt      = omap2_mmu_interrupt,
326         .pte_get_attr   = omap2_mmu_pte_get_attr,
327 };
328 EXPORT_SYMBOL_GPL(omap2_mmu_ops);
329
330 MODULE_LICENSE("GPL");