]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mach-omap2/mmu.c
ARM: OMAP: Fix omap mmu framework for omap1
[linux-2.6-omap-h63xx.git] / arch / arm / mach-omap2 / mmu.c
1 /*
2  * linux/arch/arm/mach-omap2/mmu.c
3  *
4  * Support for non-MPU OMAP2 MMUs.
5  *
6  * Copyright (C) 2002-2007 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  */
27 #include <linux/types.h>
28 #include <linux/init.h>
29 #include <linux/rwsem.h>
30 #include <linux/device.h>
31 #include <linux/mm.h>
32 #include <linux/interrupt.h>
33 #include <linux/err.h>
34 #include "mmu.h"
35 #include <asm/arch/mmu.h>
36 #include <asm/tlbflush.h>
37 #include <asm/io.h>
38 #include <asm/sizes.h>
39
40 static void *dspvect_page;
41 #define DSP_INIT_PAGE   0xfff000
42
43 static inline void
44 omap2_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
45 {
46         cr->cam = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM);
47         cr->ram = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM);
48 }
49
50 static inline void
51 omap2_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
52 {
53         /* Set the CAM and RAM entries */
54         omap_mmu_write_reg(mmu, cr->cam | OMAP_MMU_CAM_V, OMAP_MMU_CAM);
55         omap_mmu_write_reg(mmu, cr->ram, OMAP_MMU_RAM);
56 }
57
58 static void exmap_setup_iomap_page(struct omap_mmu *mmu, unsigned long phys,
59                                    unsigned long dsp_io_adr, int index)
60 {
61         unsigned long dspadr;
62         void *virt;
63         struct omap_mmu_tlb_entry tlb_ent;
64
65         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
66         virt = omap_mmu_to_virt(mmu, dspadr);
67         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
68         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, NULL, virt);
69         INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
70         omap_mmu_load_pte_entry(mmu, &tlb_ent);
71 }
72
73 static void exmap_clear_iomap_page(struct omap_mmu *mmu,
74                                    unsigned long dsp_io_adr)
75 {
76         unsigned long dspadr;
77         void *virt;
78
79         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
80         virt = omap_mmu_to_virt(mmu, dspadr);
81         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
82         /* DSP MMU is shutting down. not handled here. */
83 }
84
85 #define OMAP24XX_MAILBOX_BASE   (L4_24XX_BASE + 0x94000)
86 #define OMAP2420_GPT5_BASE      (L4_24XX_BASE + 0x7c000)
87 #define OMAP2420_GPT6_BASE      (L4_24XX_BASE + 0x7e000)
88 #define OMAP2420_GPT7_BASE      (L4_24XX_BASE + 0x80000)
89 #define OMAP2420_GPT8_BASE      (L4_24XX_BASE + 0x82000)
90 #define OMAP24XX_EAC_BASE       (L4_24XX_BASE + 0x90000)
91 #define OMAP24XX_STI_BASE       (L4_24XX_BASE + 0x68000)
92 #define OMAP24XX_STI_CH_BASE    (L4_24XX_BASE + 0x0c000000)
93
94 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
95 {
96         int i, n = 0;
97
98         exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
99
100         /* REVISIT: This will need to be revisited for 3430 */
101         exmap_setup_iomap_page(mmu, OMAP2_PRCM_BASE, 0x7000, n++);
102         exmap_setup_iomap_page(mmu, OMAP24XX_MAILBOX_BASE, 0x11000, n++);
103
104         if (cpu_is_omap2420()) {
105                 exmap_setup_iomap_page(mmu, OMAP2420_GPT5_BASE, 0xe000, n++);
106                 exmap_setup_iomap_page(mmu, OMAP2420_GPT6_BASE, 0xe800, n++);
107                 exmap_setup_iomap_page(mmu, OMAP2420_GPT7_BASE, 0xf000, n++);
108                 exmap_setup_iomap_page(mmu, OMAP2420_GPT8_BASE, 0xf800, n++);
109                 exmap_setup_iomap_page(mmu, OMAP24XX_EAC_BASE,  0x10000, n++);
110                 exmap_setup_iomap_page(mmu, OMAP24XX_STI_BASE, 0xc800, n++);
111                 for (i = 0; i < 5; i++)
112                         exmap_setup_preserved_mem_page(mmu,
113                                 __va(OMAP24XX_STI_CH_BASE + i*SZ_4K),
114                                 0xfb0000 + i*SZ_4K, n++);
115         }
116
117         return n;
118 }
119
120 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
121 {
122         int i;
123
124         exmap_clear_iomap_page(mmu, 0x7000);    /* PRCM registers */
125         exmap_clear_iomap_page(mmu, 0x11000);   /* MAILBOX registers */
126
127         if (cpu_is_omap2420()) {
128                 exmap_clear_iomap_page(mmu, 0xe000);    /* GPT5 */
129                 exmap_clear_iomap_page(mmu, 0xe800);    /* GPT6 */
130                 exmap_clear_iomap_page(mmu, 0xf000);    /* GPT7 */
131                 exmap_clear_iomap_page(mmu, 0xf800);    /* GPT8 */
132                 exmap_clear_iomap_page(mmu, 0x10000);   /* EAC */
133                 exmap_clear_iomap_page(mmu, 0xc800);    /* STI */
134                 for (i = 0; i < 5; i++)                 /* STI CH */
135                         exmap_clear_mem_page(mmu, 0xfb0000 + i*SZ_4K);
136         }
137
138         exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
139 }
140
141 #define MMU_IRQ_MASK \
142         (OMAP_MMU_IRQ_MULTIHITFAULT | \
143          OMAP_MMU_IRQ_TABLEWALKFAULT | \
144          OMAP_MMU_IRQ_EMUMISS | \
145          OMAP_MMU_IRQ_TRANSLATIONFAULT)
146
147 static int omap2_mmu_startup(struct omap_mmu *mmu)
148 {
149         u32 rev = omap_mmu_read_reg(mmu, OMAP_MMU_REVISION);
150
151         pr_info("MMU: OMAP %s MMU initialized (HW v%d.%d)\n", mmu->name,
152                 (rev >> 4) & 0xf, rev & 0xf);
153
154         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
155         if (dspvect_page == NULL) {
156                 printk(KERN_ERR "MMU: failed to allocate memory "
157                                 "for dsp vector table\n");
158                 return -ENOMEM;
159         }
160
161         mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
162
163         omap_mmu_write_reg(mmu, MMU_IRQ_MASK, OMAP_MMU_IRQENABLE);
164
165         return 0;
166 }
167
168 static void omap2_mmu_shutdown(struct omap_mmu *mmu)
169 {
170         exmap_clear_preserved_entries(mmu);
171
172         if (dspvect_page != NULL) {
173                 unsigned long virt;
174
175                 down_read(&mmu->exmap_sem);
176
177                 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
178                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
179                 free_page((unsigned long)dspvect_page);
180                 dspvect_page = NULL;
181
182                 up_read(&mmu->exmap_sem);
183         }
184 }
185
186 static ssize_t omap2_mmu_show(struct omap_mmu *mmu, char *buf,
187                               struct omap_mmu_tlb_lock *tlb_lock)
188 {
189         int i, len;
190
191         len = sprintf(buf, "P: preserved, V: valid\n"
192                            "B: big endian, L:little endian, "
193                            "M: mixed page attribute\n"
194                            "ety P V size   cam_va     ram_pa E ES M\n");
195                          /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
196
197         for (i = 0; i < mmu->nr_tlb_entries; i++) {
198                 struct omap_mmu_tlb_entry ent;
199                 struct cam_ram_regset cr;
200                 struct omap_mmu_tlb_lock entry_lock;
201                 char *pgsz_str, *elsz_str;
202
203                 /* read a TLB entry */
204                 entry_lock.base   = tlb_lock->base;
205                 entry_lock.victim = i;
206                 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
207
208                 ent.pgsz   = cr.cam & OMAP_MMU_CAM_PAGESIZE_MASK;
209                 ent.prsvd  = cr.cam & OMAP_MMU_CAM_P;
210                 ent.valid  = cr.cam & OMAP_MMU_CAM_V;
211                 ent.va     = cr.cam & OMAP_MMU_CAM_VATAG_MASK;
212                 ent.endian = cr.ram & OMAP_MMU_RAM_ENDIANNESS;
213                 ent.elsz   = cr.ram & OMAP_MMU_RAM_ELEMENTSIZE_MASK;
214                 ent.pa     = cr.ram & OMAP_MMU_RAM_PADDR_MASK;
215                 ent.mixed  = cr.ram & OMAP_MMU_RAM_MIXED;
216
217                 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
218                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
219                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
220                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
221                                                                      " ???";
222                 elsz_str = (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_8)  ? " 8":
223                            (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_16) ? "16":
224                            (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_32) ? "32":
225                                                                       "??";
226
227                 if (i == tlb_lock->base)
228                         len += sprintf(buf + len, "lock base = %d\n",
229                                        tlb_lock->base);
230                 if (i == tlb_lock->victim)
231                         len += sprintf(buf + len, "victim    = %d\n",
232                                        tlb_lock->victim);
233
234                 len += sprintf(buf + len,
235                                /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
236                                "%02d: %c %c %s 0x%06lx 0x%08lx %c %s %c\n",
237                                i,
238                                ent.prsvd ? 'P' : ' ',
239                                ent.valid ? 'V' : ' ',
240                                pgsz_str, ent.va, ent.pa,
241                                ent.endian ? 'B' : 'L',
242                                elsz_str,
243                                ent.mixed ? 'M' : ' ');
244         }
245
246         return len;
247 }
248
249 #define get_cam_va_mask(pgsz) \
250         (((pgsz) == OMAP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
251          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_1MB)  ? 0xfff00000 : \
252          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
253          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_4KB)  ? 0xfffff000 : 0)
254
255 static inline unsigned long omap2_mmu_cam_va(struct cam_ram_regset *cr)
256 {
257         unsigned int page_size = cr->cam & OMAP_MMU_CAM_PAGESIZE_MASK;
258         unsigned int mask = get_cam_va_mask(cr->cam & page_size);
259
260         return cr->cam & mask;
261 }
262
263 static struct cam_ram_regset *
264 omap2_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
265 {
266         struct cam_ram_regset *cr;
267
268         if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
269                 printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
270                        "aligned boundary\n", entry->va);
271                 return ERR_PTR(-EINVAL);
272         }
273
274         cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
275
276         cr->cam = (entry->va & OMAP_MMU_CAM_VATAG_MASK) |
277                   entry->prsvd | entry->pgsz;
278         cr->ram = entry->pa | entry->endian | entry->elsz;
279
280         return cr;
281 }
282
283 static inline int omap2_mmu_cam_ram_valid(struct cam_ram_regset *cr)
284 {
285         return cr->cam & OMAP_MMU_CAM_V;
286 }
287
288 static void omap2_mmu_interrupt(struct omap_mmu *mmu)
289 {
290         unsigned long status, va;
291
292         status = MMU_IRQ_MASK & omap_mmu_read_reg(mmu, OMAP_MMU_IRQSTATUS);
293         va = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD);
294
295         pr_info("%s\n", (status & OMAP_MMU_IRQ_MULTIHITFAULT)           ? "multi hit":"");
296         pr_info("%s\n", (status & OMAP_MMU_IRQ_TABLEWALKFAULT)          ? "table walk fault":"");
297         pr_info("%s\n", (status & OMAP_MMU_IRQ_EMUMISS)                 ? "EMU miss":"");
298         pr_info("%s\n", (status & OMAP_MMU_IRQ_TRANSLATIONFAULT)        ? "translation fault":"");
299         pr_info("%s\n", (status & OMAP_MMU_IRQ_TLBMISS)                 ? "TLB miss":"");
300         pr_info("fault address = %#08lx\n", va);
301
302         omap_mmu_disable(mmu);
303         omap_mmu_write_reg(mmu, status, OMAP_MMU_IRQSTATUS);
304
305         mmu->fault_address = va;
306         schedule_work(&mmu->irq_work);
307 }
308
309 static pgprot_t omap2_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
310 {
311         u32 attr;
312
313         attr = entry->mixed << 5;
314         attr |= entry->endian;
315         attr |= entry->elsz >> 3;
316         attr <<= ((entry->pgsz & OMAP_MMU_CAM_PAGESIZE_4KB) ? 0:6);
317
318         return attr;
319 }
320
321 struct omap_mmu_ops omap2_mmu_ops = {
322         .startup        = omap2_mmu_startup,
323         .shutdown       = omap2_mmu_shutdown,
324         .read_tlb       = omap2_mmu_read_tlb,
325         .load_tlb       = omap2_mmu_load_tlb,
326         .show           = omap2_mmu_show,
327         .cam_va         = omap2_mmu_cam_va,
328         .cam_ram_alloc  = omap2_mmu_cam_ram_alloc,
329         .cam_ram_valid  = omap2_mmu_cam_ram_valid,
330         .interrupt      = omap2_mmu_interrupt,
331         .pte_get_attr   = omap2_mmu_pte_get_attr,
332 };
333 EXPORT_SYMBOL_GPL(omap2_mmu_ops);
334
335 MODULE_LICENSE("GPL");