]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mach-omap1/mmu.c
f1b682aeaf5aca3be79be55aa461e80860061140
[linux-2.6-omap-h63xx.git] / arch / arm / mach-omap1 / mmu.c
1 /*
2  * linux/arch/arm/mach-omap1/mmu.c
3  *
4  * Support for non-MPU OMAP1 MMUs.
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24  */
25 #include <linux/types.h>
26 #include <linux/init.h>
27 #include <linux/rwsem.h>
28 #include <linux/device.h>
29 #include <linux/kernel.h>
30 #include <linux/mm.h>
31 #include <linux/interrupt.h>
32 #include <linux/err.h>
33 #include "mmu.h"
34 #include <asm/tlbflush.h>
35
36 static void *dspvect_page;
37 #define DSP_INIT_PAGE   0xfff000
38
39 #define MMUFAULT_MASK (OMAP_MMU_FAULT_ST_PERM |\
40                        OMAP_MMU_FAULT_ST_TLB_MISS |\
41                        OMAP_MMU_FAULT_ST_TRANS)
42
43 static unsigned int get_cam_l_va_mask(u16 pgsz)
44 {
45         switch (pgsz) {
46         case OMAP_MMU_CAM_PAGESIZE_1MB:
47                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
48                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
49         case OMAP_MMU_CAM_PAGESIZE_64KB:
50                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
51                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
52         case OMAP_MMU_CAM_PAGESIZE_4KB:
53                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
54                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
55         case OMAP_MMU_CAM_PAGESIZE_1KB:
56                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
57                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
58         }
59         return 0;
60 }
61
62 #define get_cam_va_mask(pgsz) \
63         ((u32)OMAP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
64          (u32)get_cam_l_va_mask(pgsz) << 6)
65
66 static int intmem_usecount;
67
68 /* for safety */
69 void dsp_mem_usecount_clear(void)
70 {
71         if (intmem_usecount != 0) {
72                 printk(KERN_WARNING
73                        "MMU: unbalanced memory request/release detected.\n"
74                        "         intmem_usecount is not zero at where "
75                        "it should be! ... fixed to be zero.\n");
76                 intmem_usecount = 0;
77                 omap_dsp_release_mem();
78         }
79 }
80 EXPORT_SYMBOL_GPL(dsp_mem_usecount_clear);
81
82 static int omap1_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
83 {
84         int ret = 0;
85
86         if (omap_mmu_internal_memory(mmu, addr)) {
87                 if (intmem_usecount++ == 0)
88                         ret = omap_dsp_request_mem();
89         }
90
91         return ret;
92 }
93
94 static int omap1_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
95 {
96         int ret = 0;
97
98         if (omap_mmu_internal_memory(mmu, addr)) {
99                 if (--intmem_usecount == 0)
100                         omap_dsp_release_mem();
101         } else
102                 ret = -EIO;
103
104         return ret;
105 }
106
107 static inline void
108 omap1_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
109 {
110         /* read a TLB entry */
111         omap_mmu_write_reg(mmu, OMAP_MMU_LD_TLB_RD, OMAP_MMU_LD_TLB);
112
113         cr->cam_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_H);
114         cr->cam_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_L);
115         cr->ram_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_H);
116         cr->ram_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_L);
117 }
118
119 static inline void
120 omap1_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
121 {
122         /* Set the CAM and RAM entries */
123         omap_mmu_write_reg(mmu, cr->cam_h, OMAP_MMU_CAM_H);
124         omap_mmu_write_reg(mmu, cr->cam_l, OMAP_MMU_CAM_L);
125         omap_mmu_write_reg(mmu, cr->ram_h, OMAP_MMU_RAM_H);
126         omap_mmu_write_reg(mmu, cr->ram_l, OMAP_MMU_RAM_L);
127 }
128
129 static ssize_t omap1_mmu_show(struct omap_mmu *mmu, char *buf,
130                               struct omap_mmu_tlb_lock *tlb_lock)
131 {
132         int i, len;
133
134         len = sprintf(buf, "P: preserved, V: valid\n"
135                            "ety P V size   cam_va     ram_pa ap\n");
136                          /* 00: P V  4KB 0x300000 0x10171800 FA */
137
138         for (i = 0; i < mmu->nr_tlb_entries; i++) {
139                 struct omap_mmu_tlb_entry ent;
140                 struct cam_ram_regset cr;
141                 struct omap_mmu_tlb_lock entry_lock;
142                 char *pgsz_str, *ap_str;
143
144                 /* read a TLB entry */
145                 entry_lock.base   = tlb_lock->base;
146                 entry_lock.victim = i;
147                 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
148
149                 ent.pgsz  = cr.cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
150                 ent.prsvd = cr.cam_l & OMAP_MMU_CAM_P;
151                 ent.valid = cr.cam_l & OMAP_MMU_CAM_V;
152                 ent.ap    = cr.ram_l & OMAP_MMU_RAM_L_AP_MASK;
153                 ent.va = (u32)(cr.cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
154                          (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
155                 ent.pa = (unsigned long)cr.ram_h << 16 |
156                          (cr.ram_l & OMAP_MMU_RAM_L_RAM_LSB_MASK);
157
158                 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
159                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
160                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
161                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1KB)  ? " 1KB":
162                                                                      " ???";
163                 ap_str = (ent.ap == OMAP_MMU_RAM_L_AP_RO) ? "RO":
164                          (ent.ap == OMAP_MMU_RAM_L_AP_FA) ? "FA":
165                          (ent.ap == OMAP_MMU_RAM_L_AP_NA) ? "NA":
166                                                            "??";
167
168                 if (i == tlb_lock->base)
169                         len += sprintf(buf + len, "lock base = %d\n",
170                                        tlb_lock->base);
171                 if (i == tlb_lock->victim)
172                         len += sprintf(buf + len, "victim    = %d\n",
173                                        tlb_lock->victim);
174                 len += sprintf(buf + len,
175                                /* 00: P V  4KB 0x300000 0x10171800 FA */
176                                "%02d: %c %c %s 0x%06lx 0x%08lx %s\n",
177                                i,
178                                ent.prsvd ? 'P' : ' ',
179                                ent.valid ? 'V' : ' ',
180                                pgsz_str, ent.va, ent.pa, ap_str);
181         }
182
183         return len;
184 }
185
186 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
187 {
188         int n = 0;
189
190         exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
191
192         return n;
193 }
194
195 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
196 {
197         exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
198 }
199
200 static int omap1_mmu_startup(struct omap_mmu *mmu)
201 {
202         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
203         if (dspvect_page == NULL) {
204                 dev_err(&mmu->dev, "MMU %s: failed to allocate memory "
205                         "for vector table\n", mmu->name);
206                 return -ENOMEM;
207         }
208
209         mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
210
211         return 0;
212 }
213
214 static void omap1_mmu_shutdown(struct omap_mmu *mmu)
215 {
216         exmap_clear_preserved_entries(mmu);
217
218         if (dspvect_page != NULL) {
219                 unsigned long virt;
220
221                 down_read(&mmu->exmap_sem);
222
223                 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
224                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
225                 free_page((unsigned long)dspvect_page);
226                 dspvect_page = NULL;
227
228                 up_read(&mmu->exmap_sem);
229         }
230 }
231
232 static inline unsigned long omap1_mmu_cam_va(struct cam_ram_regset *cr)
233 {
234         unsigned int page_size = cr->cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
235
236         return (u32)(cr->cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK)  << 22 |
237                (u32)(cr->cam_l & get_cam_l_va_mask(page_size)) << 6;
238 }
239
240 static struct cam_ram_regset *
241 omap1_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
242 {
243         struct cam_ram_regset *cr;
244
245         if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
246                 dev_err(&mmu->dev, "MMU %s: mapping vadr (0x%06lx) is not on"
247                         " an aligned boundary\n", mmu->name, entry->va);
248                 return ERR_PTR(-EINVAL);
249         }
250
251         cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
252
253         cr->cam_h = entry->va >> 22;
254         cr->cam_l = (entry->va >> 6 & get_cam_l_va_mask(entry->pgsz)) |
255                    entry->prsvd | entry->pgsz;
256         cr->ram_h = entry->pa >> 16;
257         cr->ram_l = (entry->pa & OMAP_MMU_RAM_L_RAM_LSB_MASK) | entry->ap;
258
259         return cr;
260 }
261
262 static inline int omap1_mmu_cam_ram_valid(struct cam_ram_regset *cr)
263 {
264         return cr->cam_l & OMAP_MMU_CAM_V;
265 }
266
267 static void omap1_mmu_interrupt(struct omap_mmu *mmu)
268 {
269         unsigned long status;
270         unsigned long adh, adl;
271         unsigned long dp;
272         unsigned long va;
273
274         status = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_ST);
275         adh = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_H);
276         adl = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_L);
277         dp = adh & OMAP_MMU_FAULT_AD_H_DP;
278         va = (((adh & OMAP_MMU_FAULT_AD_H_ADR_MASK) << 16) | adl);
279
280         /* if the fault is masked, nothing to do */
281         if ((status & MMUFAULT_MASK) == 0) {
282                 pr_debug( "MMU interrupt, but ignoring.\n");
283                 /*
284                  * note: in OMAP1710,
285                  * when CACHE + DMA domain gets out of idle in DSP,
286                  * MMU interrupt occurs but MMU_FAULT_ST is not set.
287                  * in this case, we just ignore the interrupt.
288                  */
289                 if (status) {
290                         pr_debug( "%s%s%s%s\n",
291                                   (status & OMAP_MMU_FAULT_ST_PREF)?
292                                   "  (prefetch err)" : "",
293                                   (status & OMAP_MMU_FAULT_ST_PERM)?
294                                   "  (permission fault)" : "",
295                                   (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
296                                   "  (TLB miss)" : "",
297                                   (status & OMAP_MMU_FAULT_ST_TRANS) ?
298                                   "  (translation fault)": "");
299                         pr_debug( "fault address = %#08lx\n", va);
300                 }
301                 enable_irq(mmu->irq);
302                 return;
303         }
304
305         pr_info("%s%s%s%s\n",
306                 (status & OMAP_MMU_FAULT_ST_PREF)?
307                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PREF)?
308                 "  prefetch err":
309                 "  (prefetch err)":
310                 "",
311                 (status & OMAP_MMU_FAULT_ST_PERM)?
312                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PERM)?
313                 "  permission fault":
314                 "  (permission fault)":
315                 "",
316                 (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
317                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TLB_MISS)?
318                 "  TLB miss":
319                 "  (TLB miss)":
320                 "",
321                 (status & OMAP_MMU_FAULT_ST_TRANS)?
322                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TRANS)?
323                 "  translation fault":
324                 "  (translation fault)":
325                 "");
326         pr_info("fault address = %#08lx\n", va);
327
328         mmu->fault_address = va;
329         schedule_work(&mmu->irq_work);
330 }
331
332 static pgprot_t omap1_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
333 {
334         /* 4KB AP position as default */
335         u32 attr = entry->ap >> 4;
336         attr <<= ((entry->pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? 6:0);
337         return attr;
338 }
339
340 struct omap_mmu_ops omap1_mmu_ops = {
341         .startup        = omap1_mmu_startup,
342         .shutdown       = omap1_mmu_shutdown,
343         .mem_enable     = omap1_mmu_mem_enable,
344         .mem_disable    = omap1_mmu_mem_disable,
345         .read_tlb       = omap1_mmu_read_tlb,
346         .load_tlb       = omap1_mmu_load_tlb,
347         .show           = omap1_mmu_show,
348         .cam_va         = omap1_mmu_cam_va,
349         .cam_ram_alloc  = omap1_mmu_cam_ram_alloc,
350         .cam_ram_valid  = omap1_mmu_cam_ram_valid,
351         .interrupt      = omap1_mmu_interrupt,
352         .pte_get_attr   = omap1_mmu_pte_get_attr,
353 };
354 EXPORT_SYMBOL_GPL(omap1_mmu_ops);