2 * linux/arch/arm/mm/mm-armv.c
4 * Copyright (C) 1998-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for ARM v3 and v4 processor architectures.
12 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
18 #include <linux/nodemask.h>
20 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
24 #include <asm/tlbflush.h>
26 #include <asm/mach/map.h>
28 #define CPOLICY_UNCACHED 0
29 #define CPOLICY_BUFFERED 1
30 #define CPOLICY_WRITETHROUGH 2
31 #define CPOLICY_WRITEBACK 3
32 #define CPOLICY_WRITEALLOC 4
34 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
35 static unsigned int ecc_mask __initdata = 0;
36 pgprot_t pgprot_kernel;
38 EXPORT_SYMBOL(pgprot_kernel);
43 const char policy[16];
49 static struct cachepolicy cache_policies[] __initdata = {
53 .pmd = PMD_SECT_UNCACHED,
58 .pmd = PMD_SECT_BUFFERED,
59 .pte = PTE_BUFFERABLE,
61 .policy = "writethrough",
66 .policy = "writeback",
69 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
71 .policy = "writealloc",
74 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
79 * These are useful for identifing cache coherency
80 * problems by allowing the cache or the cache and
81 * writebuffer to be turned off. (Note: the write
82 * buffer should not be on and the cache off).
84 static void __init early_cachepolicy(char **p)
88 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
89 int len = strlen(cache_policies[i].policy);
91 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
93 cr_alignment &= ~cache_policies[i].cr_mask;
94 cr_no_alignment &= ~cache_policies[i].cr_mask;
99 if (i == ARRAY_SIZE(cache_policies))
100 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
102 set_cr(cr_alignment);
105 static void __init early_nocache(char **__unused)
107 char *p = "buffered";
108 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
109 early_cachepolicy(&p);
112 static void __init early_nowrite(char **__unused)
114 char *p = "uncached";
115 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
116 early_cachepolicy(&p);
119 static void __init early_ecc(char **p)
121 if (memcmp(*p, "on", 2) == 0) {
122 ecc_mask = PMD_PROTECTION;
124 } else if (memcmp(*p, "off", 3) == 0) {
130 __early_param("nocache", early_nocache);
131 __early_param("nowb", early_nowrite);
132 __early_param("cachepolicy=", early_cachepolicy);
133 __early_param("ecc=", early_ecc);
135 static int __init noalign_setup(char *__unused)
137 cr_alignment &= ~CR_A;
138 cr_no_alignment &= ~CR_A;
139 set_cr(cr_alignment);
143 __setup("noalign", noalign_setup);
145 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
147 static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
149 return pmd_offset(pgd, virt);
152 static inline pmd_t *pmd_off_k(unsigned long virt)
154 return pmd_off(pgd_offset_k(virt), virt);
158 * need to get a 16k page for level 1
160 pgd_t *get_pgd_slow(struct mm_struct *mm)
162 pgd_t *new_pgd, *init_pgd;
163 pmd_t *new_pmd, *init_pmd;
164 pte_t *new_pte, *init_pte;
166 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
170 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
173 * Copy over the kernel and IO PGD entries
175 init_pgd = pgd_offset_k(0);
176 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
177 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
179 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
181 if (!vectors_high()) {
183 * On ARM, first page must always be allocated since it
184 * contains the machine vectors.
186 new_pmd = pmd_alloc(mm, new_pgd, 0);
190 new_pte = pte_alloc_map(mm, new_pmd, 0);
194 init_pmd = pmd_offset(init_pgd, 0);
195 init_pte = pte_offset_map_nested(init_pmd, 0);
196 set_pte(new_pte, *init_pte);
197 pte_unmap_nested(init_pte);
206 free_pages((unsigned long)new_pgd, 2);
211 void free_pgd_slow(pgd_t *pgd)
219 /* pgd is always present and good */
220 pmd = pmd_off(pgd, 0);
229 pte = pmd_page(*pmd);
231 dec_page_state(nr_page_table_pages);
235 free_pages((unsigned long) pgd, 2);
239 * Create a SECTION PGD between VIRT and PHYS in domain
240 * DOMAIN with protection PROT. This operates on half-
241 * pgdir entry increments.
244 alloc_init_section(unsigned long virt, unsigned long phys, int prot)
246 pmd_t *pmdp = pmd_off_k(virt);
248 if (virt & (1 << 20))
251 *pmdp = __pmd(phys | prot);
252 flush_pmd_entry(pmdp);
256 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
259 alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
263 for (i = 0; i < 16; i += 1) {
264 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
266 virt += (PGDIR_SIZE / 2);
271 * Add a PAGE mapping between VIRT and PHYS in domain
272 * DOMAIN with protection PROT. Note that due to the
273 * way we map the PTEs, we must allocate two PTE_SIZE'd
274 * blocks - one for the Linux pte table, and one for
275 * the hardware pte table.
278 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
280 pmd_t *pmdp = pmd_off_k(virt);
283 if (pmd_none(*pmdp)) {
284 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
287 __pmd_populate(pmdp, __pa(ptep) | prot_l1);
289 ptep = pte_offset_kernel(pmdp, virt);
291 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
295 unsigned int prot_pte;
296 unsigned int prot_l1;
297 unsigned int prot_sect;
301 static struct mem_types mem_types[] __initdata = {
303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
305 .prot_l1 = PMD_TYPE_TABLE,
306 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
311 .prot_sect = PMD_TYPE_SECT,
312 .domain = DOMAIN_KERNEL,
315 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
316 .domain = DOMAIN_KERNEL,
319 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
321 .prot_l1 = PMD_TYPE_TABLE,
322 .domain = DOMAIN_USER,
324 [MT_HIGH_VECTORS] = {
325 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
326 L_PTE_USER | L_PTE_EXEC,
327 .prot_l1 = PMD_TYPE_TABLE,
328 .domain = DOMAIN_USER,
331 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
332 .domain = DOMAIN_KERNEL,
335 .prot_sect = PMD_TYPE_SECT,
336 .domain = DOMAIN_KERNEL,
338 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
339 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
341 .prot_l1 = PMD_TYPE_TABLE,
342 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
343 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
350 * Adjust the PMD section entries according to the CPU in use.
352 void __init build_mem_type_table(void)
354 struct cachepolicy *cp;
355 unsigned int cr = get_cr();
356 unsigned int user_pgprot;
357 int cpu_arch = cpu_architecture();
360 #if defined(CONFIG_CPU_DCACHE_DISABLE)
361 if (cachepolicy > CPOLICY_BUFFERED)
362 cachepolicy = CPOLICY_BUFFERED;
363 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
364 if (cachepolicy > CPOLICY_WRITETHROUGH)
365 cachepolicy = CPOLICY_WRITETHROUGH;
367 if (cpu_arch < CPU_ARCH_ARMv5) {
368 if (cachepolicy >= CPOLICY_WRITEALLOC)
369 cachepolicy = CPOLICY_WRITEBACK;
373 if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
374 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
375 if (mem_types[i].prot_l1)
376 mem_types[i].prot_l1 |= PMD_BIT4;
377 if (mem_types[i].prot_sect)
378 mem_types[i].prot_sect |= PMD_BIT4;
382 cp = &cache_policies[cachepolicy];
383 user_pgprot = cp->pte;
386 * ARMv6 and above have extended page tables.
388 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
390 * bit 4 becomes XN which we must clear for the
391 * kernel memory mapping.
393 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
394 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
396 * Mark cache clean areas and XIP ROM read only
397 * from SVC mode and no access from userspace.
399 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
400 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
401 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
404 * Mark the device area as "shared device"
406 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
407 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
410 * User pages need to be mapped with the ASID
413 user_pgprot |= L_PTE_ASID;
416 if (cpu_arch >= CPU_ARCH_ARMv5) {
417 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
418 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
420 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
421 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
422 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
425 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
426 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
427 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
428 mem_types[MT_ROM].prot_sect |= cp->pmd;
430 for (i = 0; i < 16; i++) {
431 unsigned long v = pgprot_val(protection_map[i]);
432 v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
433 protection_map[i] = __pgprot(v);
436 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
437 L_PTE_DIRTY | L_PTE_WRITE |
438 L_PTE_EXEC | cp->pte);
442 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
446 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
449 printk("Memory policy: ECC %sabled, Data cache %s\n",
450 ecc_mask ? "en" : "dis", cp->policy);
453 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
456 * Create the page directory entries and any necessary
457 * page tables for the mapping specified by `md'. We
458 * are able to cope here with varying sizes and address
459 * offsets, and we take full advantage of sections and
462 void __init create_mapping(struct map_desc *md)
464 unsigned long virt, length;
465 int prot_sect, prot_l1, domain;
467 unsigned long off = (u32)__pfn_to_phys(md->pfn);
469 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
470 printk(KERN_WARNING "BUG: not creating mapping for "
471 "0x%016llx at 0x%08lx in user region\n",
472 __pfn_to_phys((u64)md->pfn), md->virtual);
476 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
477 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
478 printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
479 "overlaps vmalloc space\n",
480 __pfn_to_phys((u64)md->pfn), md->virtual);
483 domain = mem_types[md->type].domain;
484 prot_pte = __pgprot(mem_types[md->type].prot_pte);
485 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
486 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
489 * Catch 36-bit addresses
491 if(md->pfn >= 0x100000) {
493 printk(KERN_ERR "MM: invalid domain in supersection "
494 "mapping for 0x%016llx at 0x%08lx\n",
495 __pfn_to_phys((u64)md->pfn), md->virtual);
498 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
499 & ~SUPERSECTION_MASK) {
500 printk(KERN_ERR "MM: cannot create mapping for "
501 "0x%016llx at 0x%08lx invalid alignment\n",
502 __pfn_to_phys((u64)md->pfn), md->virtual);
507 * Shift bits [35:32] of address into bits [23:20] of PMD
510 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
517 if (mem_types[md->type].prot_l1 == 0 &&
518 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
519 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
520 "be mapped using pages, ignoring.\n",
521 __pfn_to_phys(md->pfn), md->virtual);
525 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
526 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
532 /* N.B. ARMv6 supersections are only defined to work with domain 0.
533 * Since domain assignments can in fact be arbitrary, the
534 * 'domain == 0' check below is required to insure that ARMv6
535 * supersections are only allocated for domain 0 regardless
536 * of the actual domain assignments in use.
538 if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
540 * Align to supersection boundary if !high pages.
541 * High pages have already been checked for proper
542 * alignment above and they will fail the SUPSERSECTION_MASK
543 * check because of the way the address is encoded into
546 if (md->pfn <= 0x100000) {
547 while ((virt & ~SUPERSECTION_MASK ||
548 (virt + off) & ~SUPERSECTION_MASK) &&
549 length >= (PGDIR_SIZE / 2)) {
550 alloc_init_section(virt, virt + off, prot_sect);
552 virt += (PGDIR_SIZE / 2);
553 length -= (PGDIR_SIZE / 2);
557 while (length >= SUPERSECTION_SIZE) {
558 alloc_init_supersection(virt, virt + off, prot_sect);
560 virt += SUPERSECTION_SIZE;
561 length -= SUPERSECTION_SIZE;
566 * A section mapping covers half a "pgdir" entry.
568 while (length >= (PGDIR_SIZE / 2)) {
569 alloc_init_section(virt, virt + off, prot_sect);
571 virt += (PGDIR_SIZE / 2);
572 length -= (PGDIR_SIZE / 2);
575 while (length >= PAGE_SIZE) {
576 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
584 * In order to soft-boot, we need to insert a 1:1 mapping in place of
585 * the user-mode pages. This will then ensure that we have predictable
586 * results when turning the mmu off
588 void setup_mm_for_reboot(char mode)
590 unsigned long base_pmdval;
594 if (current->mm && current->mm->pgd)
595 pgd = current->mm->pgd;
599 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
600 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
601 base_pmdval |= PMD_BIT4;
603 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
604 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
607 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
608 pmd[0] = __pmd(pmdval);
609 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
610 flush_pmd_entry(pmd);
615 * Create the architecture specific mappings
617 void __init iotable_init(struct map_desc *io_desc, int nr)
621 for (i = 0; i < nr; i++)
622 create_mapping(io_desc + i);