]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mach-omap1/mmu.c
DSP: Move code to use only one dsp_common.h
[linux-2.6-omap-h63xx.git] / arch / arm / mach-omap1 / mmu.c
1 /*
2  * linux/arch/arm/mach-omap1/mmu.c
3  *
4  * Support for non-MPU OMAP1 MMUs.
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *        and Paul Mundt <paul.mundt@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24  */
25 #include <linux/types.h>
26 #include <linux/init.h>
27 #include <linux/rwsem.h>
28 #include <linux/device.h>
29 #include <linux/kernel.h>
30 #include <linux/mm.h>
31 #include <linux/interrupt.h>
32 #include <linux/err.h>
33 #include "mmu.h"
34 #include <asm/tlbflush.h>
35 #include <asm/arch/dsp.h>
36 #include <asm/arch/dsp_common.h>
37
38 static void *dspvect_page;
39 #define DSP_INIT_PAGE   0xfff000
40
41 #define MMUFAULT_MASK (OMAP_MMU_FAULT_ST_PERM |\
42                        OMAP_MMU_FAULT_ST_TLB_MISS |\
43                        OMAP_MMU_FAULT_ST_TRANS)
44
45 static unsigned int get_cam_l_va_mask(u16 pgsz)
46 {
47         switch (pgsz) {
48         case OMAP_MMU_CAM_PAGESIZE_1MB:
49                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
50                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
51         case OMAP_MMU_CAM_PAGESIZE_64KB:
52                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
53                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
54         case OMAP_MMU_CAM_PAGESIZE_4KB:
55                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
56                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
57         case OMAP_MMU_CAM_PAGESIZE_1KB:
58                 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
59                        OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
60         }
61         return 0;
62 }
63
64 #define get_cam_va_mask(pgsz) \
65         ((u32)OMAP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
66          (u32)get_cam_l_va_mask(pgsz) << 6)
67
68 static int intmem_usecount;
69
70 /* for safety */
71 void dsp_mem_usecount_clear(void)
72 {
73         if (intmem_usecount != 0) {
74                 printk(KERN_WARNING
75                        "MMU: unbalanced memory request/release detected.\n"
76                        "         intmem_usecount is not zero at where "
77                        "it should be! ... fixed to be zero.\n");
78                 intmem_usecount = 0;
79                 omap_dsp_release_mem();
80         }
81 }
82 EXPORT_SYMBOL_GPL(dsp_mem_usecount_clear);
83
84 static int omap1_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
85 {
86         int ret = 0;
87
88         if (omap_mmu_internal_memory(mmu, addr)) {
89                 if (intmem_usecount++ == 0)
90                         ret = omap_dsp_request_mem();
91         }
92
93         return ret;
94 }
95
96 static int omap1_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
97 {
98         int ret = 0;
99
100         if (omap_mmu_internal_memory(mmu, addr)) {
101                 if (--intmem_usecount == 0)
102                         omap_dsp_release_mem();
103         } else
104                 ret = -EIO;
105
106         return ret;
107 }
108
109 static inline void
110 omap1_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
111 {
112         /* read a TLB entry */
113         omap_mmu_write_reg(mmu, OMAP_MMU_LD_TLB_RD, OMAP_MMU_LD_TLB);
114
115         cr->cam_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_H);
116         cr->cam_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_L);
117         cr->ram_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_H);
118         cr->ram_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_L);
119 }
120
121 static inline void
122 omap1_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
123 {
124         /* Set the CAM and RAM entries */
125         omap_mmu_write_reg(mmu, cr->cam_h, OMAP_MMU_CAM_H);
126         omap_mmu_write_reg(mmu, cr->cam_l, OMAP_MMU_CAM_L);
127         omap_mmu_write_reg(mmu, cr->ram_h, OMAP_MMU_RAM_H);
128         omap_mmu_write_reg(mmu, cr->ram_l, OMAP_MMU_RAM_L);
129 }
130
131 static ssize_t omap1_mmu_show(struct omap_mmu *mmu, char *buf,
132                               struct omap_mmu_tlb_lock *tlb_lock)
133 {
134         int i, len;
135
136         len = sprintf(buf, "P: preserved, V: valid\n"
137                            "ety P V size   cam_va     ram_pa ap\n");
138                          /* 00: P V  4KB 0x300000 0x10171800 FA */
139
140         for (i = 0; i < mmu->nr_tlb_entries; i++) {
141                 struct omap_mmu_tlb_entry ent;
142                 struct cam_ram_regset cr;
143                 struct omap_mmu_tlb_lock entry_lock;
144                 char *pgsz_str, *ap_str;
145
146                 /* read a TLB entry */
147                 entry_lock.base   = tlb_lock->base;
148                 entry_lock.victim = i;
149                 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
150
151                 ent.pgsz  = cr.cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
152                 ent.prsvd = cr.cam_l & OMAP_MMU_CAM_P;
153                 ent.valid = cr.cam_l & OMAP_MMU_CAM_V;
154                 ent.ap    = cr.ram_l & OMAP_MMU_RAM_L_AP_MASK;
155                 ent.va = (u32)(cr.cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
156                          (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
157                 ent.pa = (unsigned long)cr.ram_h << 16 |
158                          (cr.ram_l & OMAP_MMU_RAM_L_RAM_LSB_MASK);
159
160                 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
161                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
162                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
163                            (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1KB)  ? " 1KB":
164                                                                      " ???";
165                 ap_str = (ent.ap == OMAP_MMU_RAM_L_AP_RO) ? "RO":
166                          (ent.ap == OMAP_MMU_RAM_L_AP_FA) ? "FA":
167                          (ent.ap == OMAP_MMU_RAM_L_AP_NA) ? "NA":
168                                                            "??";
169
170                 if (i == tlb_lock->base)
171                         len += sprintf(buf + len, "lock base = %d\n",
172                                        tlb_lock->base);
173                 if (i == tlb_lock->victim)
174                         len += sprintf(buf + len, "victim    = %d\n",
175                                        tlb_lock->victim);
176                 len += sprintf(buf + len,
177                                /* 00: P V  4KB 0x300000 0x10171800 FA */
178                                "%02d: %c %c %s 0x%06lx 0x%08lx %s\n",
179                                i,
180                                ent.prsvd ? 'P' : ' ',
181                                ent.valid ? 'V' : ' ',
182                                pgsz_str, ent.va, ent.pa, ap_str);
183         }
184
185         return len;
186 }
187
188 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
189 {
190         int n = 0;
191
192         exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
193
194         return n;
195 }
196
197 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
198 {
199         exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
200 }
201
202 static int omap1_mmu_startup(struct omap_mmu *mmu)
203 {
204         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
205         if (dspvect_page == NULL) {
206                 dev_err(&mmu->dev, "MMU %s: failed to allocate memory "
207                         "for vector table\n", mmu->name);
208                 return -ENOMEM;
209         }
210
211         mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
212
213         return 0;
214 }
215
216 static void omap1_mmu_shutdown(struct omap_mmu *mmu)
217 {
218         exmap_clear_preserved_entries(mmu);
219
220         if (dspvect_page != NULL) {
221                 unsigned long virt;
222
223                 down_read(&mmu->exmap_sem);
224
225                 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
226                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
227                 free_page((unsigned long)dspvect_page);
228                 dspvect_page = NULL;
229
230                 up_read(&mmu->exmap_sem);
231         }
232 }
233
234 static inline unsigned long omap1_mmu_cam_va(struct cam_ram_regset *cr)
235 {
236         unsigned int page_size = cr->cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
237
238         return (u32)(cr->cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK)  << 22 |
239                (u32)(cr->cam_l & get_cam_l_va_mask(page_size)) << 6;
240 }
241
242 static struct cam_ram_regset *
243 omap1_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
244 {
245         struct cam_ram_regset *cr;
246
247         if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
248                 dev_err(&mmu->dev, "MMU %s: mapping vadr (0x%06lx) is not on"
249                         " an aligned boundary\n", mmu->name, entry->va);
250                 return ERR_PTR(-EINVAL);
251         }
252
253         cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
254
255         cr->cam_h = entry->va >> 22;
256         cr->cam_l = (entry->va >> 6 & get_cam_l_va_mask(entry->pgsz)) |
257                    entry->prsvd | entry->pgsz;
258         cr->ram_h = entry->pa >> 16;
259         cr->ram_l = (entry->pa & OMAP_MMU_RAM_L_RAM_LSB_MASK) | entry->ap;
260
261         return cr;
262 }
263
264 static inline int omap1_mmu_cam_ram_valid(struct cam_ram_regset *cr)
265 {
266         return cr->cam_l & OMAP_MMU_CAM_V;
267 }
268
269 static void omap1_mmu_interrupt(struct omap_mmu *mmu)
270 {
271         unsigned long status;
272         unsigned long adh, adl;
273         unsigned long dp;
274         unsigned long va;
275
276         status = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_ST);
277         adh = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_H);
278         adl = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_L);
279         dp = adh & OMAP_MMU_FAULT_AD_H_DP;
280         va = (((adh & OMAP_MMU_FAULT_AD_H_ADR_MASK) << 16) | adl);
281
282         /* if the fault is masked, nothing to do */
283         if ((status & MMUFAULT_MASK) == 0) {
284                 pr_debug( "MMU interrupt, but ignoring.\n");
285                 /*
286                  * note: in OMAP1710,
287                  * when CACHE + DMA domain gets out of idle in DSP,
288                  * MMU interrupt occurs but MMU_FAULT_ST is not set.
289                  * in this case, we just ignore the interrupt.
290                  */
291                 if (status) {
292                         pr_debug( "%s%s%s%s\n",
293                                   (status & OMAP_MMU_FAULT_ST_PREF)?
294                                   "  (prefetch err)" : "",
295                                   (status & OMAP_MMU_FAULT_ST_PERM)?
296                                   "  (permission fault)" : "",
297                                   (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
298                                   "  (TLB miss)" : "",
299                                   (status & OMAP_MMU_FAULT_ST_TRANS) ?
300                                   "  (translation fault)": "");
301                         pr_debug( "fault address = %#08lx\n", va);
302                 }
303                 enable_irq(mmu->irq);
304                 return;
305         }
306
307         pr_info("%s%s%s%s\n",
308                 (status & OMAP_MMU_FAULT_ST_PREF)?
309                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PREF)?
310                 "  prefetch err":
311                 "  (prefetch err)":
312                 "",
313                 (status & OMAP_MMU_FAULT_ST_PERM)?
314                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PERM)?
315                 "  permission fault":
316                 "  (permission fault)":
317                 "",
318                 (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
319                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TLB_MISS)?
320                 "  TLB miss":
321                 "  (TLB miss)":
322                 "",
323                 (status & OMAP_MMU_FAULT_ST_TRANS)?
324                 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TRANS)?
325                 "  translation fault":
326                 "  (translation fault)":
327                 "");
328         pr_info("fault address = %#08lx\n", va);
329
330         mmu->fault_address = va;
331         schedule_work(&mmu->irq_work);
332 }
333
334 static pgprot_t omap1_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
335 {
336         /* 4KB AP position as default */
337         u32 attr = entry->ap >> 4;
338         attr <<= ((entry->pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? 6:0);
339         return attr;
340 }
341
342 struct omap_mmu_ops omap1_mmu_ops = {
343         .startup        = omap1_mmu_startup,
344         .shutdown       = omap1_mmu_shutdown,
345         .mem_enable     = omap1_mmu_mem_enable,
346         .mem_disable    = omap1_mmu_mem_disable,
347         .read_tlb       = omap1_mmu_read_tlb,
348         .load_tlb       = omap1_mmu_load_tlb,
349         .show           = omap1_mmu_show,
350         .cam_va         = omap1_mmu_cam_va,
351         .cam_ram_alloc  = omap1_mmu_cam_ram_alloc,
352         .cam_ram_valid  = omap1_mmu_cam_ram_valid,
353         .interrupt      = omap1_mmu_interrupt,
354         .pte_get_attr   = omap1_mmu_pte_get_attr,
355 };
356 EXPORT_SYMBOL_GPL(omap1_mmu_ops);