]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mach-omap2/mmu.c
ARM:OMAP: Add initial TWL support for non-MPU MMU framework
[linux-2.6-omap-h63xx.git] / arch / arm / mach-omap2 / mmu.c
1 /*
2  * linux/arch/arm/mach-omap2/mmu.c
3  *
4  * Support for non-MPU OMAP2 MMUs.
5  *
6  * Copyright (C) 2002-2007 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  */
27 #include <linux/types.h>
28 #include <linux/init.h>
29 #include <linux/rwsem.h>
30 #include <linux/device.h>
31 #include <linux/mm.h>
32 #include <linux/interrupt.h>
33 #include "mmu.h"
34 #include <asm/arch/mmu.h>
35 #include <asm/tlbflush.h>
36 #include <asm/io.h>
37 #include <asm/sizes.h>
38
39 static void *dspvect_page;
40 #define DSP_INIT_PAGE   0xfff000
41
42 static inline void
43 omap2_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
44 {
45         cr->cam = omap_mmu_read_reg(mmu, MMU_READ_CAM);
46         cr->ram = omap_mmu_read_reg(mmu, MMU_READ_RAM);
47 }
48
49 static inline void
50 omap2_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
51 {
52         /* Set the CAM and RAM entries */
53         omap_mmu_write_reg(mmu, cr->cam | OMAP_MMU_CAM_V, MMU_CAM);
54         omap_mmu_write_reg(mmu, cr->ram, MMU_RAM);
55 }
56
57 static void exmap_setup_iomap_page(struct omap_mmu *mmu, unsigned long phys,
58                                    unsigned long dsp_io_adr, int index)
59 {
60         unsigned long dspadr;
61         void *virt;
62         struct omap_mmu_tlb_entry tlb_ent;
63
64         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
65         virt = omap_mmu_to_virt(mmu, dspadr);
66         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
67         INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, NULL, virt);
68         INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
69         omap_mmu_load_pte_entry(mmu, &tlb_ent);
70 }
71
72 static void exmap_clear_iomap_page(struct omap_mmu *mmu,
73                                    unsigned long dsp_io_adr)
74 {
75         unsigned long dspadr;
76         void *virt;
77
78         dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
79         virt = omap_mmu_to_virt(mmu, dspadr);
80         exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
81         /* DSP MMU is shutting down. not handled here. */
82 }
83
84 #define OMAP24XX_MAILBOX_BASE   (L4_24XX_BASE + 0x94000)
85 #define OMAP2420_GPT5_BASE      (L4_24XX_BASE + 0x7c000)
86 #define OMAP2420_GPT6_BASE      (L4_24XX_BASE + 0x7e000)
87 #define OMAP2420_GPT7_BASE      (L4_24XX_BASE + 0x80000)
88 #define OMAP2420_GPT8_BASE      (L4_24XX_BASE + 0x82000)
89 #define OMAP24XX_EAC_BASE       (L4_24XX_BASE + 0x90000)
90 #define OMAP24XX_STI_BASE       (L4_24XX_BASE + 0x68000)
91 #define OMAP24XX_STI_CH_BASE    (L4_24XX_BASE + 0x0c000000)
92
93 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
94 {
95         int i, n = 0;
96
97         exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
98
99         exmap_setup_iomap_page(mmu, OMAP24XX_PRCM_BASE, 0x7000, n++);
100         exmap_setup_iomap_page(mmu, OMAP24XX_MAILBOX_BASE, 0x11000, n++);
101
102         if (cpu_is_omap2420()) {
103                 exmap_setup_iomap_page(mmu, OMAP2420_GPT5_BASE, 0xe000, n++);
104                 exmap_setup_iomap_page(mmu, OMAP2420_GPT6_BASE, 0xe800, n++);
105                 exmap_setup_iomap_page(mmu, OMAP2420_GPT7_BASE, 0xf000, n++);
106                 exmap_setup_iomap_page(mmu, OMAP2420_GPT8_BASE, 0xf800, n++);
107                 exmap_setup_iomap_page(mmu, OMAP24XX_EAC_BASE,  0x10000, n++);
108                 exmap_setup_iomap_page(mmu, OMAP24XX_STI_BASE, 0xc800, n++);
109                 for (i = 0; i < 5; i++)
110                         exmap_setup_preserved_mem_page(mmu,
111                                 __va(OMAP24XX_STI_CH_BASE + i*SZ_4K),
112                                 0xfb0000 + i*SZ_4K, n++);
113         }
114
115         return n;
116 }
117
118 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
119 {
120         int i;
121
122         exmap_clear_iomap_page(mmu, 0x7000);    /* PRCM registers */
123         exmap_clear_iomap_page(mmu, 0x11000);   /* MAILBOX registers */
124
125         if (cpu_is_omap2420()) {
126                 exmap_clear_iomap_page(mmu, 0xe000);    /* GPT5 */
127                 exmap_clear_iomap_page(mmu, 0xe800);    /* GPT6 */
128                 exmap_clear_iomap_page(mmu, 0xf000);    /* GPT7 */
129                 exmap_clear_iomap_page(mmu, 0xf800);    /* GPT8 */
130                 exmap_clear_iomap_page(mmu, 0x10000);   /* EAC */
131                 exmap_clear_iomap_page(mmu, 0xc800);    /* STI */
132                 for (i = 0; i < 5; i++)                 /* STI CH */
133                         exmap_clear_mem_page(mmu, 0xfb0000 + i*SZ_4K);
134         }
135
136         exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
137 }
138
139 #define MMU_IRQ_MASK \
140         (OMAP_MMU_IRQ_MULTIHITFAULT | \
141          OMAP_MMU_IRQ_TABLEWALKFAULT | \
142          OMAP_MMU_IRQ_EMUMISS | \
143          OMAP_MMU_IRQ_TRANSLATIONFAULT)
144
145 static int omap2_mmu_startup(struct omap_mmu *mmu)
146 {
147         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
148         if (dspvect_page == NULL) {
149                 printk(KERN_ERR "MMU: failed to allocate memory "
150                                 "for dsp vector table\n");
151                 return -ENOMEM;
152         }
153
154         mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
155
156         omap_mmu_write_reg(mmu, MMU_IRQ_MASK, MMU_IRQENABLE);
157
158         return 0;
159 }
160
161 static void omap2_mmu_shutdown(struct omap_mmu *mmu)
162 {
163         exmap_clear_preserved_entries(mmu);
164
165         if (dspvect_page != NULL) {
166                 unsigned long virt;
167
168                 down_read(&mmu->exmap_sem);
169
170                 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
171                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
172                 free_page((unsigned long)dspvect_page);
173                 dspvect_page = NULL;
174
175                 up_read(&mmu->exmap_sem);
176         }
177 }
178
179 static ssize_t omap2_mmu_show(struct omap_mmu *mmu, char *buf,
180                               struct omap_mmu_tlb_lock *tlb_lock)
181 {
182         int i, len;
183
184         len = sprintf(buf, "P: preserved, V: valid\n"
185                            "B: big endian, L:little endian, "
186                            "M: mixed page attribute\n"
187                            "ety P V size   cam_va     ram_pa E ES M\n");
188                          /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
189
190         for (i = 0; i < mmu->nr_tlb_entries; i++) {
191                 struct omap_mmu_tlb_entry ent;
192                 struct cam_ram_regset cr;
193                 struct omap_mmu_tlb_lock entry_lock;
194                 char *pgsz_str, *elsz_str;
195
196                 /* read a TLB entry */
197                 entry_lock.base   = tlb_lock->base;
198                 entry_lock.victim = i;
199                 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
200
201                 ent.pgsz   = cr.cam & OMAP_MMU_CAM_PAGESIZE_MASK;
202                 ent.prsvd  = cr.cam & OMAP_MMU_CAM_P;
203                 ent.valid  = cr.cam & OMAP_MMU_CAM_V;
204                 ent.va     = cr.cam & OMAP_MMU_CAM_VATAG_MASK;
205                 ent.endian = cr.ram & OMAP_MMU_RAM_ENDIANNESS;
206                 ent.elsz   = cr.ram & OMAP_MMU_RAM_ELEMENTSIZE_MASK;
207                 ent.pa     = cr.ram & OMAP_MMU_RAM_PADDR_MASK;
208                 ent.mixed  = cr.ram & OMAP_MMU_RAM_MIXED;
209
210                 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
211                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
212                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
213                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
214                                                                      " ???";
215                 elsz_str = (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_8)  ? " 8":
216                            (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_16) ? "16":
217                            (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_32) ? "32":
218                                                                       "??";
219
220                 if (i == tlb_lock->base)
221                         len += sprintf(buf + len, "lock base = %d\n",
222                                        tlb_lock->base);
223                 if (i == tlb_lock->victim)
224                         len += sprintf(buf + len, "victim    = %d\n",
225                                        tlb_lock->victim);
226
227                 len += sprintf(buf + len,
228                                /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
229                                "%02d: %c %c %s 0x%06lx 0x%08lx %c %s %c\n",
230                                i,
231                                ent.prsvd ? 'P' : ' ',
232                                ent.valid ? 'V' : ' ',
233                                pgsz_str, ent.va, ent.pa,
234                                ent.endian ? 'B' : 'L',
235                                elsz_str,
236                                ent.mixed ? 'M' : ' ');
237         }
238
239         return len;
240 }
241
242 #define get_cam_va_mask(pgsz) \
243         (((pgsz) == OMAP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
244          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_1MB)  ? 0xfff00000 : \
245          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
246          ((pgsz) == OMAP_MMU_CAM_PAGESIZE_4KB)  ? 0xfffff000 : 0)
247
248 static inline unsigned long omap2_mmu_cam_va(struct cam_ram_regset *cr)
249 {
250         unsigned int page_size = cr->cam & OMAP_MMU_CAM_PAGESIZE_MASK;
251         unsigned int mask = get_cam_va_mask(cr->cam & page_size);
252
253         return cr->cam & mask;
254 }
255
256 static struct cam_ram_regset *
257 omap2_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
258 {
259         struct cam_ram_regset *cr;
260
261         if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
262                 printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
263                        "aligned boundary\n", entry->va);
264                 return ERR_PTR(-EINVAL);
265         }
266
267         cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
268
269         cr->cam = (entry->va & OMAP_MMU_CAM_VATAG_MASK) |
270                   entry->prsvd | entry->pgsz;
271         cr->ram = entry->pa | entry->endian | entry->elsz;
272
273         return cr;
274 }
275
276 static inline int omap2_mmu_cam_ram_valid(struct cam_ram_regset *cr)
277 {
278         return cr->cam & OMAP_MMU_CAM_V;
279 }
280
281 static void omap2_mmu_interrupt(struct omap_mmu *mmu)
282 {
283         unsigned long status, va;
284
285         status = MMU_IRQ_MASK & omap_mmu_read_reg(mmu, MMU_IRQSTATUS);
286         va = omap_mmu_read_reg(mmu, MMU_FAULT_AD);
287
288         pr_info("%s\n", (status & OMAP_MMU_IRQ_MULTIHITFAULT)           ? "multi hit":"");
289         pr_info("%s\n", (status & OMAP_MMU_IRQ_TABLEWALKFAULT)          ? "table walk fault":"");
290         pr_info("%s\n", (status & OMAP_MMU_IRQ_EMUMISS)                 ? "EMU miss":"");
291         pr_info("%s\n", (status & OMAP_MMU_IRQ_TRANSLATIONFAULT)        ? "translation fault":"");
292         pr_info("%s\n", (status & OMAP_MMU_IRQ_TLBMISS)                 ? "TLB miss":"");
293         pr_info("fault address = %#08lx\n", va);
294
295         omap_mmu_disable(mmu);
296         omap_mmu_write_reg(mmu, status, MMU_IRQSTATUS);
297
298         mmu->fault_address = va;
299         schedule_work(&mmu->irq_work);
300 }
301
302 static pgprot_t omap2_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
303 {
304         u32 attr;
305
306         attr = entry->mixed << 5;
307         attr |= entry->endian;
308         attr |= entry->elsz >> 3;
309         attr <<= ((entry->pgsz & OMAP_MMU_CAM_PAGESIZE_4KB) ? 0:6);
310
311         return attr;
312 }
313
314 struct omap_mmu_ops omap2_mmu_ops = {
315         .startup        = omap2_mmu_startup,
316         .shutdown       = omap2_mmu_shutdown,
317         .read_tlb       = omap2_mmu_read_tlb,
318         .load_tlb       = omap2_mmu_load_tlb,
319         .show           = omap2_mmu_show,
320         .cam_va         = omap2_mmu_cam_va,
321         .cam_ram_alloc  = omap2_mmu_cam_ram_alloc,
322         .cam_ram_valid  = omap2_mmu_cam_ram_valid,
323         .interrupt      = omap2_mmu_interrupt,
324         .pte_get_attr   = omap2_mmu_pte_get_attr,
325 };
326 EXPORT_SYMBOL_GPL(omap2_mmu_ops);
327
328 MODULE_LICENSE("GPL");