]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mm/mm-armv.c
[PATCH] mm: arches skip ptlock
[linux-2.6-omap-h63xx.git] / arch / arm / mm / mm-armv.c
1 /*
2  *  linux/arch/arm/mm/mm-armv.c
3  *
4  *  Copyright (C) 1998-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Page table sludge for ARM v3 and v4 processor architectures.
11  */
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
18 #include <linux/nodemask.h>
19
20 #include <asm/pgalloc.h>
21 #include <asm/page.h>
22 #include <asm/io.h>
23 #include <asm/setup.h>
24 #include <asm/tlbflush.h>
25
26 #include <asm/mach/map.h>
27
28 #define CPOLICY_UNCACHED        0
29 #define CPOLICY_BUFFERED        1
30 #define CPOLICY_WRITETHROUGH    2
31 #define CPOLICY_WRITEBACK       3
32 #define CPOLICY_WRITEALLOC      4
33
34 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
35 static unsigned int ecc_mask __initdata = 0;
36 pgprot_t pgprot_kernel;
37
38 EXPORT_SYMBOL(pgprot_kernel);
39
40 pmd_t *top_pmd;
41
42 struct cachepolicy {
43         const char      policy[16];
44         unsigned int    cr_mask;
45         unsigned int    pmd;
46         unsigned int    pte;
47 };
48
49 static struct cachepolicy cache_policies[] __initdata = {
50         {
51                 .policy         = "uncached",
52                 .cr_mask        = CR_W|CR_C,
53                 .pmd            = PMD_SECT_UNCACHED,
54                 .pte            = 0,
55         }, {
56                 .policy         = "buffered",
57                 .cr_mask        = CR_C,
58                 .pmd            = PMD_SECT_BUFFERED,
59                 .pte            = PTE_BUFFERABLE,
60         }, {
61                 .policy         = "writethrough",
62                 .cr_mask        = 0,
63                 .pmd            = PMD_SECT_WT,
64                 .pte            = PTE_CACHEABLE,
65         }, {
66                 .policy         = "writeback",
67                 .cr_mask        = 0,
68                 .pmd            = PMD_SECT_WB,
69                 .pte            = PTE_BUFFERABLE|PTE_CACHEABLE,
70         }, {
71                 .policy         = "writealloc",
72                 .cr_mask        = 0,
73                 .pmd            = PMD_SECT_WBWA,
74                 .pte            = PTE_BUFFERABLE|PTE_CACHEABLE,
75         }
76 };
77
78 /*
79  * These are useful for identifing cache coherency
80  * problems by allowing the cache or the cache and
81  * writebuffer to be turned off.  (Note: the write
82  * buffer should not be on and the cache off).
83  */
84 static void __init early_cachepolicy(char **p)
85 {
86         int i;
87
88         for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
89                 int len = strlen(cache_policies[i].policy);
90
91                 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
92                         cachepolicy = i;
93                         cr_alignment &= ~cache_policies[i].cr_mask;
94                         cr_no_alignment &= ~cache_policies[i].cr_mask;
95                         *p += len;
96                         break;
97                 }
98         }
99         if (i == ARRAY_SIZE(cache_policies))
100                 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
101         flush_cache_all();
102         set_cr(cr_alignment);
103 }
104
105 static void __init early_nocache(char **__unused)
106 {
107         char *p = "buffered";
108         printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
109         early_cachepolicy(&p);
110 }
111
112 static void __init early_nowrite(char **__unused)
113 {
114         char *p = "uncached";
115         printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
116         early_cachepolicy(&p);
117 }
118
119 static void __init early_ecc(char **p)
120 {
121         if (memcmp(*p, "on", 2) == 0) {
122                 ecc_mask = PMD_PROTECTION;
123                 *p += 2;
124         } else if (memcmp(*p, "off", 3) == 0) {
125                 ecc_mask = 0;
126                 *p += 3;
127         }
128 }
129
130 __early_param("nocache", early_nocache);
131 __early_param("nowb", early_nowrite);
132 __early_param("cachepolicy=", early_cachepolicy);
133 __early_param("ecc=", early_ecc);
134
135 static int __init noalign_setup(char *__unused)
136 {
137         cr_alignment &= ~CR_A;
138         cr_no_alignment &= ~CR_A;
139         set_cr(cr_alignment);
140         return 1;
141 }
142
143 __setup("noalign", noalign_setup);
144
145 #define FIRST_KERNEL_PGD_NR     (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
146
147 static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
148 {
149         return pmd_offset(pgd, virt);
150 }
151
152 static inline pmd_t *pmd_off_k(unsigned long virt)
153 {
154         return pmd_off(pgd_offset_k(virt), virt);
155 }
156
157 /*
158  * need to get a 16k page for level 1
159  */
160 pgd_t *get_pgd_slow(struct mm_struct *mm)
161 {
162         pgd_t *new_pgd, *init_pgd;
163         pmd_t *new_pmd, *init_pmd;
164         pte_t *new_pte, *init_pte;
165
166         new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
167         if (!new_pgd)
168                 goto no_pgd;
169
170         memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
171
172         /*
173          * Copy over the kernel and IO PGD entries
174          */
175         init_pgd = pgd_offset_k(0);
176         memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
177                        (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
178
179         clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
180
181         if (!vectors_high()) {
182                 /*
183                  * On ARM, first page must always be allocated since it
184                  * contains the machine vectors.
185                  */
186                 new_pmd = pmd_alloc(mm, new_pgd, 0);
187                 if (!new_pmd)
188                         goto no_pmd;
189
190                 new_pte = pte_alloc_map(mm, new_pmd, 0);
191                 if (!new_pte)
192                         goto no_pte;
193
194                 init_pmd = pmd_offset(init_pgd, 0);
195                 init_pte = pte_offset_map_nested(init_pmd, 0);
196                 set_pte(new_pte, *init_pte);
197                 pte_unmap_nested(init_pte);
198                 pte_unmap(new_pte);
199         }
200
201         return new_pgd;
202
203 no_pte:
204         pmd_free(new_pmd);
205 no_pmd:
206         free_pages((unsigned long)new_pgd, 2);
207 no_pgd:
208         return NULL;
209 }
210
211 void free_pgd_slow(pgd_t *pgd)
212 {
213         pmd_t *pmd;
214         struct page *pte;
215
216         if (!pgd)
217                 return;
218
219         /* pgd is always present and good */
220         pmd = pmd_off(pgd, 0);
221         if (pmd_none(*pmd))
222                 goto free;
223         if (pmd_bad(*pmd)) {
224                 pmd_ERROR(*pmd);
225                 pmd_clear(pmd);
226                 goto free;
227         }
228
229         pte = pmd_page(*pmd);
230         pmd_clear(pmd);
231         dec_page_state(nr_page_table_pages);
232         pte_free(pte);
233         pmd_free(pmd);
234 free:
235         free_pages((unsigned long) pgd, 2);
236 }
237
238 /*
239  * Create a SECTION PGD between VIRT and PHYS in domain
240  * DOMAIN with protection PROT.  This operates on half-
241  * pgdir entry increments.
242  */
243 static inline void
244 alloc_init_section(unsigned long virt, unsigned long phys, int prot)
245 {
246         pmd_t *pmdp = pmd_off_k(virt);
247
248         if (virt & (1 << 20))
249                 pmdp++;
250
251         *pmdp = __pmd(phys | prot);
252         flush_pmd_entry(pmdp);
253 }
254
255 /*
256  * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
257  */
258 static inline void
259 alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
260 {
261         int i;
262
263         for (i = 0; i < 16; i += 1) {
264                 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
265
266                 virt += (PGDIR_SIZE / 2);
267         }
268 }
269
270 /*
271  * Add a PAGE mapping between VIRT and PHYS in domain
272  * DOMAIN with protection PROT.  Note that due to the
273  * way we map the PTEs, we must allocate two PTE_SIZE'd
274  * blocks - one for the Linux pte table, and one for
275  * the hardware pte table.
276  */
277 static inline void
278 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
279 {
280         pmd_t *pmdp = pmd_off_k(virt);
281         pte_t *ptep;
282
283         if (pmd_none(*pmdp)) {
284                 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
285                                                sizeof(pte_t));
286
287                 __pmd_populate(pmdp, __pa(ptep) | prot_l1);
288         }
289         ptep = pte_offset_kernel(pmdp, virt);
290
291         set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
292 }
293
294 struct mem_types {
295         unsigned int    prot_pte;
296         unsigned int    prot_l1;
297         unsigned int    prot_sect;
298         unsigned int    domain;
299 };
300
301 static struct mem_types mem_types[] __initdata = {
302         [MT_DEVICE] = {
303                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
304                                 L_PTE_WRITE,
305                 .prot_l1   = PMD_TYPE_TABLE,
306                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
307                                 PMD_SECT_AP_WRITE,
308                 .domain    = DOMAIN_IO,
309         },
310         [MT_CACHECLEAN] = {
311                 .prot_sect = PMD_TYPE_SECT,
312                 .domain    = DOMAIN_KERNEL,
313         },
314         [MT_MINICLEAN] = {
315                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
316                 .domain    = DOMAIN_KERNEL,
317         },
318         [MT_LOW_VECTORS] = {
319                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
320                                 L_PTE_EXEC,
321                 .prot_l1   = PMD_TYPE_TABLE,
322                 .domain    = DOMAIN_USER,
323         },
324         [MT_HIGH_VECTORS] = {
325                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
326                                 L_PTE_USER | L_PTE_EXEC,
327                 .prot_l1   = PMD_TYPE_TABLE,
328                 .domain    = DOMAIN_USER,
329         },
330         [MT_MEMORY] = {
331                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
332                 .domain    = DOMAIN_KERNEL,
333         },
334         [MT_ROM] = {
335                 .prot_sect = PMD_TYPE_SECT,
336                 .domain    = DOMAIN_KERNEL,
337         },
338         [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
339                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
340                                 L_PTE_WRITE,
341                 .prot_l1   = PMD_TYPE_TABLE,
342                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
343                                 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
344                                 PMD_SECT_TEX(1),
345                 .domain    = DOMAIN_IO,
346         }
347 };
348
349 /*
350  * Adjust the PMD section entries according to the CPU in use.
351  */
352 void __init build_mem_type_table(void)
353 {
354         struct cachepolicy *cp;
355         unsigned int cr = get_cr();
356         unsigned int user_pgprot;
357         int cpu_arch = cpu_architecture();
358         int i;
359
360 #if defined(CONFIG_CPU_DCACHE_DISABLE)
361         if (cachepolicy > CPOLICY_BUFFERED)
362                 cachepolicy = CPOLICY_BUFFERED;
363 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
364         if (cachepolicy > CPOLICY_WRITETHROUGH)
365                 cachepolicy = CPOLICY_WRITETHROUGH;
366 #endif
367         if (cpu_arch < CPU_ARCH_ARMv5) {
368                 if (cachepolicy >= CPOLICY_WRITEALLOC)
369                         cachepolicy = CPOLICY_WRITEBACK;
370                 ecc_mask = 0;
371         }
372
373         if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
374                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
375                         if (mem_types[i].prot_l1)
376                                 mem_types[i].prot_l1 |= PMD_BIT4;
377                         if (mem_types[i].prot_sect)
378                                 mem_types[i].prot_sect |= PMD_BIT4;
379                 }
380         }
381
382         cp = &cache_policies[cachepolicy];
383         user_pgprot = cp->pte;
384
385         /*
386          * ARMv6 and above have extended page tables.
387          */
388         if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
389                 /*
390                  * bit 4 becomes XN which we must clear for the
391                  * kernel memory mapping.
392                  */
393                 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
394                 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
395                 /*
396                  * Mark cache clean areas and XIP ROM read only
397                  * from SVC mode and no access from userspace.
398                  */
399                 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
400                 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
401                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
402
403                 /*
404                  * Mark the device area as "shared device"
405                  */
406                 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
407                 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
408
409                 /*
410                  * User pages need to be mapped with the ASID
411                  * (iow, non-global)
412                  */
413                 user_pgprot |= L_PTE_ASID;
414         }
415
416         if (cpu_arch >= CPU_ARCH_ARMv5) {
417                 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
418                 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
419         } else {
420                 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
421                 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
422                 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
423         }
424
425         mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
426         mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
427         mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
428         mem_types[MT_ROM].prot_sect |= cp->pmd;
429
430         for (i = 0; i < 16; i++) {
431                 unsigned long v = pgprot_val(protection_map[i]);
432                 v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
433                 protection_map[i] = __pgprot(v);
434         }
435
436         pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
437                                  L_PTE_DIRTY | L_PTE_WRITE |
438                                  L_PTE_EXEC | cp->pte);
439
440         switch (cp->pmd) {
441         case PMD_SECT_WT:
442                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
443                 break;
444         case PMD_SECT_WB:
445         case PMD_SECT_WBWA:
446                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
447                 break;
448         }
449         printk("Memory policy: ECC %sabled, Data cache %s\n",
450                 ecc_mask ? "en" : "dis", cp->policy);
451 }
452
453 #define vectors_base()  (vectors_high() ? 0xffff0000 : 0)
454
455 /*
456  * Create the page directory entries and any necessary
457  * page tables for the mapping specified by `md'.  We
458  * are able to cope here with varying sizes and address
459  * offsets, and we take full advantage of sections and
460  * supersections.
461  */
462 void __init create_mapping(struct map_desc *md)
463 {
464         unsigned long virt, length;
465         int prot_sect, prot_l1, domain;
466         pgprot_t prot_pte;
467         unsigned long off = (u32)__pfn_to_phys(md->pfn);
468
469         if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
470                 printk(KERN_WARNING "BUG: not creating mapping for "
471                        "0x%016llx at 0x%08lx in user region\n",
472                        __pfn_to_phys((u64)md->pfn), md->virtual);
473                 return;
474         }
475
476         if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
477             md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
478                 printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
479                        "overlaps vmalloc space\n",
480                        __pfn_to_phys((u64)md->pfn), md->virtual);
481         }
482
483         domain    = mem_types[md->type].domain;
484         prot_pte  = __pgprot(mem_types[md->type].prot_pte);
485         prot_l1   = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
486         prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
487
488         /*
489          * Catch 36-bit addresses
490          */
491         if(md->pfn >= 0x100000) {
492                 if(domain) {
493                         printk(KERN_ERR "MM: invalid domain in supersection "
494                                 "mapping for 0x%016llx at 0x%08lx\n",
495                                 __pfn_to_phys((u64)md->pfn), md->virtual);
496                         return;
497                 }
498                 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
499                         & ~SUPERSECTION_MASK) {
500                         printk(KERN_ERR "MM: cannot create mapping for "
501                                 "0x%016llx at 0x%08lx invalid alignment\n",
502                                 __pfn_to_phys((u64)md->pfn), md->virtual);
503                         return;
504                 }
505
506                 /*
507                  * Shift bits [35:32] of address into bits [23:20] of PMD
508                  * (See ARMv6 spec).
509                  */
510                 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
511         }
512
513         virt   = md->virtual;
514         off   -= virt;
515         length = md->length;
516
517         if (mem_types[md->type].prot_l1 == 0 &&
518             (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
519                 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
520                        "be mapped using pages, ignoring.\n",
521                        __pfn_to_phys(md->pfn), md->virtual);
522                 return;
523         }
524
525         while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
526                 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
527
528                 virt   += PAGE_SIZE;
529                 length -= PAGE_SIZE;
530         }
531
532         /* N.B. ARMv6 supersections are only defined to work with domain 0.
533          *      Since domain assignments can in fact be arbitrary, the
534          *      'domain == 0' check below is required to insure that ARMv6
535          *      supersections are only allocated for domain 0 regardless
536          *      of the actual domain assignments in use.
537          */
538         if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
539                 /*
540                  * Align to supersection boundary if !high pages.
541                  * High pages have already been checked for proper
542                  * alignment above and they will fail the SUPSERSECTION_MASK
543                  * check because of the way the address is encoded into
544                  * offset.
545                  */
546                 if (md->pfn <= 0x100000) {
547                         while ((virt & ~SUPERSECTION_MASK ||
548                                 (virt + off) & ~SUPERSECTION_MASK) &&
549                                 length >= (PGDIR_SIZE / 2)) {
550                                 alloc_init_section(virt, virt + off, prot_sect);
551
552                                 virt   += (PGDIR_SIZE / 2);
553                                 length -= (PGDIR_SIZE / 2);
554                         }
555                 }
556
557                 while (length >= SUPERSECTION_SIZE) {
558                         alloc_init_supersection(virt, virt + off, prot_sect);
559
560                         virt   += SUPERSECTION_SIZE;
561                         length -= SUPERSECTION_SIZE;
562                 }
563         }
564
565         /*
566          * A section mapping covers half a "pgdir" entry.
567          */
568         while (length >= (PGDIR_SIZE / 2)) {
569                 alloc_init_section(virt, virt + off, prot_sect);
570
571                 virt   += (PGDIR_SIZE / 2);
572                 length -= (PGDIR_SIZE / 2);
573         }
574
575         while (length >= PAGE_SIZE) {
576                 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
577
578                 virt   += PAGE_SIZE;
579                 length -= PAGE_SIZE;
580         }
581 }
582
583 /*
584  * In order to soft-boot, we need to insert a 1:1 mapping in place of
585  * the user-mode pages.  This will then ensure that we have predictable
586  * results when turning the mmu off
587  */
588 void setup_mm_for_reboot(char mode)
589 {
590         unsigned long base_pmdval;
591         pgd_t *pgd;
592         int i;
593
594         if (current->mm && current->mm->pgd)
595                 pgd = current->mm->pgd;
596         else
597                 pgd = init_mm.pgd;
598
599         base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
600         if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
601                 base_pmdval |= PMD_BIT4;
602
603         for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
604                 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
605                 pmd_t *pmd;
606
607                 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
608                 pmd[0] = __pmd(pmdval);
609                 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
610                 flush_pmd_entry(pmd);
611         }
612 }
613
614 /*
615  * Create the architecture specific mappings
616  */
617 void __init iotable_init(struct map_desc *io_desc, int nr)
618 {
619         int i;
620
621         for (i = 0; i < nr; i++)
622                 create_mapping(io_desc + i);
623 }