]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mach-omap1/mmu.c
Merge omap-drivers
[linux-2.6-omap-h63xx.git] / arch / arm / mach-omap1 / mmu.c
1 /*
2  * linux/arch/arm/mach-omap2/mmu.c
3  *
4  * Support for non-MPU OMAP1 MMUs.
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24  */
25 #include <linux/types.h>
26 #include <linux/init.h>
27 #include <linux/rwsem.h>
28 #include <linux/device.h>
29 #include <linux/kernel.h>
30 #include <linux/mm.h>
31 #include <linux/interrupt.h>
32 #include <linux/err.h>
33 #include "mmu.h"
34 #include <asm/tlbflush.h>
35
36 static void *dspvect_page;
37 #define DSP_INIT_PAGE   0xfff000
38
39 #define MMUFAULT_MASK (OMAP_MMU_FAULT_ST_PERM |\
40                        OMAP_MMU_FAULT_ST_TLB_MISS |\
41                        OMAP_MMU_FAULT_ST_TRANS)
42
43 static unsigned int get_cam_l_va_mask(u16 pgsz)
44 {
45         switch (pgsz) {
46         case OMAP_MMU_CAM_PAGESIZE_1MB:
47                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
48                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
49         case OMAP_MMU_CAM_PAGESIZE_64KB:
50                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
51                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
52         case OMAP_MMU_CAM_PAGESIZE_4KB:
53                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
54                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
55         case OMAP_MMU_CAM_PAGESIZE_1KB:
56                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
57                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
58         }
59         return 0;
60 }
61
62 #define get_cam_va_mask(pgsz) \
63         ((u32)OMAP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
64          (u32)get_cam_l_va_mask(pgsz) << 6)
65
66 static int intmem_usecount;
67
68 /* for safety */
69 void dsp_mem_usecount_clear(void)
70 {
71         if (intmem_usecount != 0) {
72                 printk(KERN_WARNING
73                        "MMU: unbalanced memory request/release detected.\n"
74                        "         intmem_usecount is not zero at where "
75                        "it should be! ... fixed to be zero.\n");
76                 intmem_usecount = 0;
77                 omap_dsp_release_mem();
78         }
79 }
80 EXPORT_SYMBOL_GPL(dsp_mem_usecount_clear);
81
82 static int omap1_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
83 {
84         int ret = 0;
85
86         if (omap_mmu_internal_memory(mmu, addr)) {
87                 if (intmem_usecount++ == 0)
88                         ret = omap_dsp_request_mem();
89         } else
90                 ret = -EIO;
91
92         return ret;
93 }
94
95 static int omap1_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
96 {
97         int ret = 0;
98
99         if (omap_mmu_internal_memory(mmu, addr)) {
100                 if (--intmem_usecount == 0)
101                         omap_dsp_release_mem();
102         } else
103                 ret = -EIO;
104
105         return ret;
106 }
107
108 static inline void
109 omap1_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
110 {
111         /* read a TLB entry */
112         omap_mmu_write_reg(mmu, OMAP_MMU_LD_TLB_RD, OMAP_MMU_LD_TLB);
113
114         cr->cam_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_H);
115         cr->cam_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_L);
116         cr->ram_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_H);
117         cr->ram_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_L);
118 }
119
120 static inline void
121 omap1_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
122 {
123         /* Set the CAM and RAM entries */
124         omap_mmu_write_reg(mmu, cr->cam_h, OMAP_MMU_CAM_H);
125         omap_mmu_write_reg(mmu, cr->cam_l, OMAP_MMU_CAM_L);
126         omap_mmu_write_reg(mmu, cr->ram_h, OMAP_MMU_RAM_H);
127         omap_mmu_write_reg(mmu, cr->ram_l, OMAP_MMU_RAM_L);
128 }
129
130 static ssize_t omap1_mmu_show(struct omap_mmu *mmu, char *buf,
131                               struct omap_mmu_tlb_lock *tlb_lock)
132 {
133         int i, len;
134
135         len = sprintf(buf, "P: preserved, V: valid\n"
136                            "ety P V size   cam_va     ram_pa ap\n");
137                          /* 00: P V  4KB 0x300000 0x10171800 FA */
138
139         for (i = 0; i < mmu->nr_tlb_entries; i++) {
140                 struct omap_mmu_tlb_entry ent;
141                 struct cam_ram_regset cr;
142                 struct omap_mmu_tlb_lock entry_lock;
143                 char *pgsz_str, *ap_str;
144
145                 /* read a TLB entry */
146                 entry_lock.base   = tlb_lock->base;
147                 entry_lock.victim = i;
148                 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
149
150                 ent.pgsz  = cr.cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
151                 ent.prsvd = cr.cam_l & OMAP_MMU_CAM_P;
152                 ent.valid = cr.cam_l & OMAP_MMU_CAM_V;
153                 ent.ap    = cr.ram_l & OMAP_MMU_RAM_L_AP_MASK;
154                 ent.va = (u32)(cr.cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
155                          (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
156                 ent.pa = (unsigned long)cr.ram_h << 16 |
157                          (cr.ram_l & OMAP_MMU_RAM_L_RAM_LSB_MASK);
158
159                 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
160                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
161                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
162                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1KB)  ? " 1KB":
163                                                                      " ???";
164                 ap_str = (ent.ap == OMAP_MMU_RAM_L_AP_RO) ? "RO":
165                          (ent.ap == OMAP_MMU_RAM_L_AP_FA) ? "FA":
166                          (ent.ap == OMAP_MMU_RAM_L_AP_NA) ? "NA":
167                                                            "??";
168
169                 if (i == tlb_lock->base)
170                         len += sprintf(buf + len, "lock base = %d\n",
171                                        tlb_lock->base);
172                 if (i == tlb_lock->victim)
173                         len += sprintf(buf + len, "victim    = %d\n",
174                                        tlb_lock->victim);
175                 len += sprintf(buf + len,
176                                /* 00: P V  4KB 0x300000 0x10171800 FA */
177                                "%02d: %c %c %s 0x%06lx 0x%08lx %s\n",
178                                i,
179                                ent.prsvd ? 'P' : ' ',
180                                ent.valid ? 'V' : ' ',
181                                pgsz_str, ent.va, ent.pa, ap_str);
182         }
183
184         return len;
185 }
186
187 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
188 {
189         int n = 0;
190
191         exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
192
193         return n;
194 }
195
196 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
197 {
198         exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
199 }
200
201 static int omap1_mmu_startup(struct omap_mmu *mmu)
202 {
203         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
204         if (dspvect_page == NULL) {
205                 printk(KERN_ERR "MMU: failed to allocate memory "
206                                 "for dsp vector table\n");
207                 return -ENOMEM;
208         }
209
210         mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
211
212         return 0;
213 }
214
215 static void omap1_mmu_shutdown(struct omap_mmu *mmu)
216 {
217         exmap_clear_preserved_entries(mmu);
218
219         if (dspvect_page != NULL) {
220                 unsigned long virt;
221
222                 down_read(&mmu->exmap_sem);
223
224                 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
225                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
226                 free_page((unsigned long)dspvect_page);
227                 dspvect_page = NULL;
228
229                 up_read(&mmu->exmap_sem);
230         }
231 }
232
233 static inline unsigned long omap1_mmu_cam_va(struct cam_ram_regset *cr)
234 {
235         unsigned int page_size = cr->cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
236
237         return (u32)(cr->cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK)  << 22 |
238                (u32)(cr->cam_l & get_cam_l_va_mask(page_size)) << 6;
239 }
240
241 static struct cam_ram_regset *
242 omap1_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
243 {
244         struct cam_ram_regset *cr;
245
246         if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
247                 printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
248                        "aligned boundary\n", entry->va);
249                 return ERR_PTR(-EINVAL);
250         }
251
252         cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
253
254         cr->cam_h = entry->va >> 22;
255         cr->cam_l = (entry->va >> 6 & get_cam_l_va_mask(entry->pgsz)) |
256                    entry->prsvd | entry->pgsz;
257         cr->ram_h = entry->pa >> 16;
258         cr->ram_l = (entry->pa & OMAP_MMU_RAM_L_RAM_LSB_MASK) | entry->ap;
259
260         return cr;
261 }
262
263 static inline int omap1_mmu_cam_ram_valid(struct cam_ram_regset *cr)
264 {
265         return cr->cam_l & OMAP_MMU_CAM_V;
266 }
267
268 static void omap1_mmu_interrupt(struct omap_mmu *mmu)
269 {
270         unsigned long status;
271         unsigned long adh, adl;
272         unsigned long dp;
273         unsigned long va;
274
275         status = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_ST);
276         adh = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_H);
277         adl = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_L);
278         dp = adh & OMAP_MMU_FAULT_AD_H_DP;
279         va = (((adh & OMAP_MMU_FAULT_AD_H_ADR_MASK) << 16) | adl);
280
281         /* if the fault is masked, nothing to do */
282         if ((status & MMUFAULT_MASK) == 0) {
283                 pr_debug( "MMU interrupt, but ignoring.\n");
284                 /*
285                  * note: in OMAP1710,
286                  * when CACHE + DMA domain gets out of idle in DSP,
287                  * MMU interrupt occurs but MMU_FAULT_ST is not set.
288                  * in this case, we just ignore the interrupt.
289                  */
290                 if (status) {
291                         pr_debug( "%s%s%s%s\n",
292                                   (status & OMAP_MMU_FAULT_ST_PREF)?
293                                   "  (prefetch err)" : "",
294                                   (status & OMAP_MMU_FAULT_ST_PERM)?
295                                   "  (permission fault)" : "",
296                                   (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
297                                   "  (TLB miss)" : "",
298                                   (status & OMAP_MMU_FAULT_ST_TRANS) ?
299                                   "  (translation fault)": "");
300                         pr_debug( "fault address = %#08lx\n", va);
301                 }
302                 enable_irq(mmu->irq);
303                 return;
304         }
305
306         pr_info("%s%s%s%s\n",
307                 (status & OMAP_MMU_FAULT_ST_PREF)?
308                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PREF)?
309                 "  prefetch err":
310                 "  (prefetch err)":
311                 "",
312                 (status & OMAP_MMU_FAULT_ST_PERM)?
313                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PERM)?
314                 "  permission fault":
315                 "  (permission fault)":
316                 "",
317                 (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
318                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TLB_MISS)?
319                 "  TLB miss":
320                 "  (TLB miss)":
321                 "",
322                 (status & OMAP_MMU_FAULT_ST_TRANS)?
323                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TRANS)?
324                 "  translation fault":
325                 "  (translation fault)":
326                 "");
327         pr_info("fault address = %#08lx\n", va);
328
329         mmu->fault_address = va;
330         schedule_work(&mmu->irq_work);
331 }
332
333 struct omap_mmu_ops omap1_mmu_ops = {
334         .startup        = omap1_mmu_startup,
335         .shutdown       = omap1_mmu_shutdown,
336         .mem_enable     = omap1_mmu_mem_enable,
337         .mem_disable    = omap1_mmu_mem_disable,
338         .read_tlb       = omap1_mmu_read_tlb,
339         .load_tlb       = omap1_mmu_load_tlb,
340         .show           = omap1_mmu_show,
341         .cam_va         = omap1_mmu_cam_va,
342         .cam_ram_alloc  = omap1_mmu_cam_ram_alloc,
343         .cam_ram_valid  = omap1_mmu_cam_ram_valid,
344         .interrupt      = omap1_mmu_interrupt,
345 };
346 EXPORT_SYMBOL_GPL(omap1_mmu_ops);