2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26 #include <linux/percpu.h>
27 #include <linux/lmb.h>
28 #include <linux/mmzone.h>
31 #include <asm/system.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
38 #include <asm/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include <asm/starfire.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
53 #define MAX_PHYS_ADDRESS (1UL << 42UL)
54 #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
55 #define KPTE_BITMAP_BYTES \
56 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
58 unsigned long kern_linear_pte_xor[2] __read_mostly;
60 /* A bitmap, one bit for every 256MB of physical memory. If the bit
61 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
62 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
64 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
66 #ifndef CONFIG_DEBUG_PAGEALLOC
67 /* A special kernel TSB for 4MB and 256MB linear mappings.
68 * Space is allocated for this right after the trap table
69 * in arch/sparc64/kernel/head.S
71 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
76 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
77 static int pavail_ents __initdata;
79 static int cmp_p64(const void *a, const void *b)
81 const struct linux_prom64_registers *x = a, *y = b;
83 if (x->phys_addr > y->phys_addr)
85 if (x->phys_addr < y->phys_addr)
90 static void __init read_obp_memory(const char *property,
91 struct linux_prom64_registers *regs,
94 int node = prom_finddevice("/memory");
95 int prop_size = prom_getproplen(node, property);
98 ents = prop_size / sizeof(struct linux_prom64_registers);
99 if (ents > MAX_BANKS) {
100 prom_printf("The machine has more %s property entries than "
101 "this kernel can support (%d).\n",
102 property, MAX_BANKS);
106 ret = prom_getproperty(node, property, (char *) regs, prop_size);
108 prom_printf("Couldn't get %s property from /memory.\n");
112 /* Sanitize what we got from the firmware, by page aligning
115 for (i = 0; i < ents; i++) {
116 unsigned long base, size;
118 base = regs[i].phys_addr;
119 size = regs[i].reg_size;
122 if (base & ~PAGE_MASK) {
123 unsigned long new_base = PAGE_ALIGN(base);
125 size -= new_base - base;
126 if ((long) size < 0L)
131 /* If it is empty, simply get rid of it.
132 * This simplifies the logic of the other
133 * functions that process these arrays.
135 memmove(®s[i], ®s[i + 1],
136 (ents - i - 1) * sizeof(regs[0]));
141 regs[i].phys_addr = base;
142 regs[i].reg_size = size;
147 sort(regs, ents, sizeof(struct linux_prom64_registers),
151 unsigned long *sparc64_valid_addr_bitmap __read_mostly;
153 /* Kernel physical address base and size in bytes. */
154 unsigned long kern_base __read_mostly;
155 unsigned long kern_size __read_mostly;
157 /* Initial ramdisk setup */
158 extern unsigned long sparc_ramdisk_image64;
159 extern unsigned int sparc_ramdisk_image;
160 extern unsigned int sparc_ramdisk_size;
162 struct page *mem_map_zero __read_mostly;
163 EXPORT_SYMBOL(mem_map_zero);
165 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
167 unsigned long sparc64_kern_pri_context __read_mostly;
168 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
169 unsigned long sparc64_kern_sec_context __read_mostly;
171 int num_kernel_image_mappings;
173 #ifdef CONFIG_DEBUG_DCFLUSH
174 atomic_t dcpage_flushes = ATOMIC_INIT(0);
176 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
180 inline void flush_dcache_page_impl(struct page *page)
182 BUG_ON(tlb_type == hypervisor);
183 #ifdef CONFIG_DEBUG_DCFLUSH
184 atomic_inc(&dcpage_flushes);
187 #ifdef DCACHE_ALIASING_POSSIBLE
188 __flush_dcache_page(page_address(page),
189 ((tlb_type == spitfire) &&
190 page_mapping(page) != NULL));
192 if (page_mapping(page) != NULL &&
193 tlb_type == spitfire)
194 __flush_icache_page(__pa(page_address(page)));
198 #define PG_dcache_dirty PG_arch_1
199 #define PG_dcache_cpu_shift 32UL
200 #define PG_dcache_cpu_mask \
201 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
203 #define dcache_dirty_cpu(page) \
204 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
206 static inline void set_dcache_dirty(struct page *page, int this_cpu)
208 unsigned long mask = this_cpu;
209 unsigned long non_cpu_bits;
211 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
212 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
214 __asm__ __volatile__("1:\n\t"
216 "and %%g7, %1, %%g1\n\t"
217 "or %%g1, %0, %%g1\n\t"
218 "casx [%2], %%g7, %%g1\n\t"
220 "membar #StoreLoad | #StoreStore\n\t"
221 "bne,pn %%xcc, 1b\n\t"
224 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
228 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
230 unsigned long mask = (1UL << PG_dcache_dirty);
232 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
235 "srlx %%g7, %4, %%g1\n\t"
236 "and %%g1, %3, %%g1\n\t"
238 "bne,pn %%icc, 2f\n\t"
239 " andn %%g7, %1, %%g1\n\t"
240 "casx [%2], %%g7, %%g1\n\t"
242 "membar #StoreLoad | #StoreStore\n\t"
243 "bne,pn %%xcc, 1b\n\t"
247 : "r" (cpu), "r" (mask), "r" (&page->flags),
248 "i" (PG_dcache_cpu_mask),
249 "i" (PG_dcache_cpu_shift)
253 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
255 unsigned long tsb_addr = (unsigned long) ent;
257 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
258 tsb_addr = __pa(tsb_addr);
260 __tsb_insert(tsb_addr, tag, pte);
263 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
264 unsigned long _PAGE_SZBITS __read_mostly;
266 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
268 struct mm_struct *mm;
270 unsigned long tag, flags;
271 unsigned long tsb_index, tsb_hash_shift;
273 if (tlb_type != hypervisor) {
274 unsigned long pfn = pte_pfn(pte);
275 unsigned long pg_flags;
278 if (pfn_valid(pfn) &&
279 (page = pfn_to_page(pfn), page_mapping(page)) &&
280 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
281 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
283 int this_cpu = get_cpu();
285 /* This is just to optimize away some function calls
289 flush_dcache_page_impl(page);
291 smp_flush_dcache_page_impl(page, cpu);
293 clear_dcache_dirty_cpu(page, cpu);
301 tsb_index = MM_TSB_BASE;
302 tsb_hash_shift = PAGE_SHIFT;
304 spin_lock_irqsave(&mm->context.lock, flags);
306 #ifdef CONFIG_HUGETLB_PAGE
307 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
308 if ((tlb_type == hypervisor &&
309 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
310 (tlb_type != hypervisor &&
311 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
312 tsb_index = MM_TSB_HUGE;
313 tsb_hash_shift = HPAGE_SHIFT;
318 tsb = mm->context.tsb_block[tsb_index].tsb;
319 tsb += ((address >> tsb_hash_shift) &
320 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
321 tag = (address >> 22UL);
322 tsb_insert(tsb, tag, pte_val(pte));
324 spin_unlock_irqrestore(&mm->context.lock, flags);
327 void flush_dcache_page(struct page *page)
329 struct address_space *mapping;
332 if (tlb_type == hypervisor)
335 /* Do not bother with the expensive D-cache flush if it
336 * is merely the zero page. The 'bigcore' testcase in GDB
337 * causes this case to run millions of times.
339 if (page == ZERO_PAGE(0))
342 this_cpu = get_cpu();
344 mapping = page_mapping(page);
345 if (mapping && !mapping_mapped(mapping)) {
346 int dirty = test_bit(PG_dcache_dirty, &page->flags);
348 int dirty_cpu = dcache_dirty_cpu(page);
350 if (dirty_cpu == this_cpu)
352 smp_flush_dcache_page_impl(page, dirty_cpu);
354 set_dcache_dirty(page, this_cpu);
356 /* We could delay the flush for the !page_mapping
357 * case too. But that case is for exec env/arg
358 * pages and those are %99 certainly going to get
359 * faulted into the tlb (and thus flushed) anyways.
361 flush_dcache_page_impl(page);
368 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
370 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
371 if (tlb_type == spitfire) {
374 /* This code only runs on Spitfire cpus so this is
375 * why we can assume _PAGE_PADDR_4U.
377 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
378 unsigned long paddr, mask = _PAGE_PADDR_4U;
380 if (kaddr >= PAGE_OFFSET)
381 paddr = kaddr & mask;
383 pgd_t *pgdp = pgd_offset_k(kaddr);
384 pud_t *pudp = pud_offset(pgdp, kaddr);
385 pmd_t *pmdp = pmd_offset(pudp, kaddr);
386 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
388 paddr = pte_val(*ptep) & mask;
390 __flush_icache_page(paddr);
395 void mmu_info(struct seq_file *m)
397 if (tlb_type == cheetah)
398 seq_printf(m, "MMU Type\t: Cheetah\n");
399 else if (tlb_type == cheetah_plus)
400 seq_printf(m, "MMU Type\t: Cheetah+\n");
401 else if (tlb_type == spitfire)
402 seq_printf(m, "MMU Type\t: Spitfire\n");
403 else if (tlb_type == hypervisor)
404 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
406 seq_printf(m, "MMU Type\t: ???\n");
408 #ifdef CONFIG_DEBUG_DCFLUSH
409 seq_printf(m, "DCPageFlushes\t: %d\n",
410 atomic_read(&dcpage_flushes));
412 seq_printf(m, "DCPageFlushesXC\t: %d\n",
413 atomic_read(&dcpage_flushes_xcall));
414 #endif /* CONFIG_SMP */
415 #endif /* CONFIG_DEBUG_DCFLUSH */
418 struct linux_prom_translation {
424 /* Exported for kernel TLB miss handling in ktlb.S */
425 struct linux_prom_translation prom_trans[512] __read_mostly;
426 unsigned int prom_trans_ents __read_mostly;
428 /* Exported for SMP bootup purposes. */
429 unsigned long kern_locked_tte_data;
431 /* The obp translations are saved based on 8k pagesize, since obp can
432 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
433 * HI_OBP_ADDRESS range are handled in ktlb.S.
435 static inline int in_obp_range(unsigned long vaddr)
437 return (vaddr >= LOW_OBP_ADDRESS &&
438 vaddr < HI_OBP_ADDRESS);
441 static int cmp_ptrans(const void *a, const void *b)
443 const struct linux_prom_translation *x = a, *y = b;
445 if (x->virt > y->virt)
447 if (x->virt < y->virt)
452 /* Read OBP translations property into 'prom_trans[]'. */
453 static void __init read_obp_translations(void)
455 int n, node, ents, first, last, i;
457 node = prom_finddevice("/virtual-memory");
458 n = prom_getproplen(node, "translations");
459 if (unlikely(n == 0 || n == -1)) {
460 prom_printf("prom_mappings: Couldn't get size.\n");
463 if (unlikely(n > sizeof(prom_trans))) {
464 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
468 if ((n = prom_getproperty(node, "translations",
469 (char *)&prom_trans[0],
470 sizeof(prom_trans))) == -1) {
471 prom_printf("prom_mappings: Couldn't get property.\n");
475 n = n / sizeof(struct linux_prom_translation);
479 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
482 /* Now kick out all the non-OBP entries. */
483 for (i = 0; i < ents; i++) {
484 if (in_obp_range(prom_trans[i].virt))
488 for (; i < ents; i++) {
489 if (!in_obp_range(prom_trans[i].virt))
494 for (i = 0; i < (last - first); i++) {
495 struct linux_prom_translation *src = &prom_trans[i + first];
496 struct linux_prom_translation *dest = &prom_trans[i];
500 for (; i < ents; i++) {
501 struct linux_prom_translation *dest = &prom_trans[i];
502 dest->virt = dest->size = dest->data = 0x0UL;
505 prom_trans_ents = last - first;
507 if (tlb_type == spitfire) {
508 /* Clear diag TTE bits. */
509 for (i = 0; i < prom_trans_ents; i++)
510 prom_trans[i].data &= ~0x0003fe0000000000UL;
514 static void __init hypervisor_tlb_lock(unsigned long vaddr,
518 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
521 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
522 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
527 static unsigned long kern_large_tte(unsigned long paddr);
529 static void __init remap_kernel(void)
531 unsigned long phys_page, tte_vaddr, tte_data;
532 int i, tlb_ent = sparc64_highest_locked_tlbent();
534 tte_vaddr = (unsigned long) KERNBASE;
535 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
536 tte_data = kern_large_tte(phys_page);
538 kern_locked_tte_data = tte_data;
540 /* Now lock us into the TLBs via Hypervisor or OBP. */
541 if (tlb_type == hypervisor) {
542 for (i = 0; i < num_kernel_image_mappings; i++) {
543 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
544 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
545 tte_vaddr += 0x400000;
546 tte_data += 0x400000;
549 for (i = 0; i < num_kernel_image_mappings; i++) {
550 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
551 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
552 tte_vaddr += 0x400000;
553 tte_data += 0x400000;
555 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
557 if (tlb_type == cheetah_plus) {
558 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
559 CTX_CHEETAH_PLUS_NUC);
560 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
561 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
566 static void __init inherit_prom_mappings(void)
568 /* Now fixup OBP's idea about where we really are mapped. */
569 printk("Remapping the kernel... ");
574 void prom_world(int enter)
577 set_fs((mm_segment_t) { get_thread_current_ds() });
579 __asm__ __volatile__("flushw");
582 void __flush_dcache_range(unsigned long start, unsigned long end)
586 if (tlb_type == spitfire) {
589 for (va = start; va < end; va += 32) {
590 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
594 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
597 for (va = start; va < end; va += 32)
598 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
602 "i" (ASI_DCACHE_INVALIDATE));
606 /* get_new_mmu_context() uses "cache + 1". */
607 DEFINE_SPINLOCK(ctx_alloc_lock);
608 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
609 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
610 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
611 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
613 /* Caller does TLB context flushing on local CPU if necessary.
614 * The caller also ensures that CTX_VALID(mm->context) is false.
616 * We must be careful about boundary cases so that we never
617 * let the user have CTX 0 (nucleus) or we ever use a CTX
618 * version of zero (and thus NO_CONTEXT would not be caught
619 * by version mis-match tests in mmu_context.h).
621 * Always invoked with interrupts disabled.
623 void get_new_mmu_context(struct mm_struct *mm)
625 unsigned long ctx, new_ctx;
626 unsigned long orig_pgsz_bits;
630 spin_lock_irqsave(&ctx_alloc_lock, flags);
631 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
632 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
633 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
635 if (new_ctx >= (1 << CTX_NR_BITS)) {
636 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
637 if (new_ctx >= ctx) {
639 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
642 new_ctx = CTX_FIRST_VERSION;
644 /* Don't call memset, for 16 entries that's just
647 mmu_context_bmap[0] = 3;
648 mmu_context_bmap[1] = 0;
649 mmu_context_bmap[2] = 0;
650 mmu_context_bmap[3] = 0;
651 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
652 mmu_context_bmap[i + 0] = 0;
653 mmu_context_bmap[i + 1] = 0;
654 mmu_context_bmap[i + 2] = 0;
655 mmu_context_bmap[i + 3] = 0;
661 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
662 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
664 tlb_context_cache = new_ctx;
665 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
666 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
668 if (unlikely(new_version))
669 smp_new_mmu_context_version();
672 static int numa_enabled = 1;
673 static int numa_debug;
675 static int __init early_numa(char *p)
680 if (strstr(p, "off"))
683 if (strstr(p, "debug"))
688 early_param("numa", early_numa);
690 #define numadbg(f, a...) \
691 do { if (numa_debug) \
692 printk(KERN_INFO f, ## a); \
695 static void __init find_ramdisk(unsigned long phys_base)
697 #ifdef CONFIG_BLK_DEV_INITRD
698 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
699 unsigned long ramdisk_image;
701 /* Older versions of the bootloader only supported a
702 * 32-bit physical address for the ramdisk image
703 * location, stored at sparc_ramdisk_image. Newer
704 * SILO versions set sparc_ramdisk_image to zero and
705 * provide a full 64-bit physical address at
706 * sparc_ramdisk_image64.
708 ramdisk_image = sparc_ramdisk_image;
710 ramdisk_image = sparc_ramdisk_image64;
712 /* Another bootloader quirk. The bootloader normalizes
713 * the physical address to KERNBASE, so we have to
714 * factor that back out and add in the lowest valid
715 * physical page address to get the true physical address.
717 ramdisk_image -= KERNBASE;
718 ramdisk_image += phys_base;
720 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
721 ramdisk_image, sparc_ramdisk_size);
723 initrd_start = ramdisk_image;
724 initrd_end = ramdisk_image + sparc_ramdisk_size;
726 lmb_reserve(initrd_start, sparc_ramdisk_size);
728 initrd_start += PAGE_OFFSET;
729 initrd_end += PAGE_OFFSET;
734 struct node_mem_mask {
737 unsigned long bootmem_paddr;
739 static struct node_mem_mask node_masks[MAX_NUMNODES];
740 static int num_node_masks;
742 int numa_cpu_lookup_table[NR_CPUS];
743 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
745 #ifdef CONFIG_NEED_MULTIPLE_NODES
747 struct mdesc_mblock {
750 u64 offset; /* RA-to-PA */
752 static struct mdesc_mblock *mblocks;
753 static int num_mblocks;
755 static unsigned long ra_to_pa(unsigned long addr)
759 for (i = 0; i < num_mblocks; i++) {
760 struct mdesc_mblock *m = &mblocks[i];
762 if (addr >= m->base &&
763 addr < (m->base + m->size)) {
771 static int find_node(unsigned long addr)
775 addr = ra_to_pa(addr);
776 for (i = 0; i < num_node_masks; i++) {
777 struct node_mem_mask *p = &node_masks[i];
779 if ((addr & p->mask) == p->val)
785 static unsigned long nid_range(unsigned long start, unsigned long end,
788 *nid = find_node(start);
790 while (start < end) {
791 int n = find_node(start);
804 static unsigned long nid_range(unsigned long start, unsigned long end,
812 /* This must be invoked after performing all of the necessary
813 * add_active_range() calls for 'nid'. We need to be able to get
814 * correct data from get_pfn_range_for_nid().
816 static void __init allocate_node_data(int nid)
818 unsigned long paddr, num_pages, start_pfn, end_pfn;
819 struct pglist_data *p;
821 #ifdef CONFIG_NEED_MULTIPLE_NODES
822 paddr = lmb_alloc_nid(sizeof(struct pglist_data),
823 SMP_CACHE_BYTES, nid, nid_range);
825 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
828 NODE_DATA(nid) = __va(paddr);
829 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
831 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
836 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
837 p->node_start_pfn = start_pfn;
838 p->node_spanned_pages = end_pfn - start_pfn;
840 if (p->node_spanned_pages) {
841 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
843 paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
846 prom_printf("Cannot allocate bootmap for nid[%d]\n",
850 node_masks[nid].bootmem_paddr = paddr;
854 static void init_node_masks_nonnuma(void)
858 numadbg("Initializing tables for non-numa.\n");
860 node_masks[0].mask = node_masks[0].val = 0;
863 for (i = 0; i < NR_CPUS; i++)
864 numa_cpu_lookup_table[i] = 0;
866 numa_cpumask_lookup_table[0] = CPU_MASK_ALL;
869 #ifdef CONFIG_NEED_MULTIPLE_NODES
870 struct pglist_data *node_data[MAX_NUMNODES];
872 EXPORT_SYMBOL(numa_cpu_lookup_table);
873 EXPORT_SYMBOL(numa_cpumask_lookup_table);
874 EXPORT_SYMBOL(node_data);
876 struct mdesc_mlgroup {
882 static struct mdesc_mlgroup *mlgroups;
883 static int num_mlgroups;
885 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
890 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
891 u64 target = mdesc_arc_target(md, arc);
894 val = mdesc_get_property(md, target,
896 if (val && *val == cfg_handle)
902 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
905 u64 arc, candidate, best_latency = ~(u64)0;
907 candidate = MDESC_NODE_NULL;
908 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
909 u64 target = mdesc_arc_target(md, arc);
910 const char *name = mdesc_node_name(md, target);
913 if (strcmp(name, "pio-latency-group"))
916 val = mdesc_get_property(md, target, "latency", NULL);
920 if (*val < best_latency) {
926 if (candidate == MDESC_NODE_NULL)
929 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
932 int of_node_to_nid(struct device_node *dp)
934 const struct linux_prom64_registers *regs;
935 struct mdesc_handle *md;
940 /* This is the right thing to do on currently supported
941 * SUN4U NUMA platforms as well, as the PCI controller does
942 * not sit behind any particular memory controller.
947 regs = of_get_property(dp, "reg", NULL);
951 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
957 mdesc_for_each_node_by_name(md, grp, "group") {
958 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
970 static void add_node_ranges(void)
974 for (i = 0; i < lmb.memory.cnt; i++) {
975 unsigned long size = lmb_size_bytes(&lmb.memory, i);
976 unsigned long start, end;
978 start = lmb.memory.region[i].base;
980 while (start < end) {
981 unsigned long this_end;
984 this_end = nid_range(start, end, &nid);
986 numadbg("Adding active range nid[%d] "
987 "start[%lx] end[%lx]\n",
988 nid, start, this_end);
990 add_active_range(nid,
992 this_end >> PAGE_SHIFT);
999 static int __init grab_mlgroups(struct mdesc_handle *md)
1001 unsigned long paddr;
1005 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1010 paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup),
1015 mlgroups = __va(paddr);
1016 num_mlgroups = count;
1019 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1020 struct mdesc_mlgroup *m = &mlgroups[count++];
1025 val = mdesc_get_property(md, node, "latency", NULL);
1027 val = mdesc_get_property(md, node, "address-match", NULL);
1029 val = mdesc_get_property(md, node, "address-mask", NULL);
1032 numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
1033 "match[%lx] mask[%lx]\n",
1034 count - 1, m->node, m->latency, m->match, m->mask);
1040 static int __init grab_mblocks(struct mdesc_handle *md)
1042 unsigned long paddr;
1046 mdesc_for_each_node_by_name(md, node, "mblock")
1051 paddr = lmb_alloc(count * sizeof(struct mdesc_mblock),
1056 mblocks = __va(paddr);
1057 num_mblocks = count;
1060 mdesc_for_each_node_by_name(md, node, "mblock") {
1061 struct mdesc_mblock *m = &mblocks[count++];
1064 val = mdesc_get_property(md, node, "base", NULL);
1066 val = mdesc_get_property(md, node, "size", NULL);
1068 val = mdesc_get_property(md, node,
1069 "address-congruence-offset", NULL);
1072 numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
1073 count - 1, m->base, m->size, m->offset);
1079 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1080 u64 grp, cpumask_t *mask)
1086 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1087 u64 target = mdesc_arc_target(md, arc);
1088 const char *name = mdesc_node_name(md, target);
1091 if (strcmp(name, "cpu"))
1093 id = mdesc_get_property(md, target, "id", NULL);
1095 cpu_set(*id, *mask);
1099 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1103 for (i = 0; i < num_mlgroups; i++) {
1104 struct mdesc_mlgroup *m = &mlgroups[i];
1105 if (m->node == node)
1111 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1114 struct mdesc_mlgroup *candidate = NULL;
1115 u64 arc, best_latency = ~(u64)0;
1116 struct node_mem_mask *n;
1118 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1119 u64 target = mdesc_arc_target(md, arc);
1120 struct mdesc_mlgroup *m = find_mlgroup(target);
1123 if (m->latency < best_latency) {
1125 best_latency = m->latency;
1131 if (num_node_masks != index) {
1132 printk(KERN_ERR "Inconsistent NUMA state, "
1133 "index[%d] != num_node_masks[%d]\n",
1134 index, num_node_masks);
1138 n = &node_masks[num_node_masks++];
1140 n->mask = candidate->mask;
1141 n->val = candidate->match;
1143 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
1144 index, n->mask, n->val, candidate->latency);
1149 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1155 numa_parse_mdesc_group_cpus(md, grp, &mask);
1157 for_each_cpu_mask(cpu, mask)
1158 numa_cpu_lookup_table[cpu] = index;
1159 numa_cpumask_lookup_table[index] = mask;
1162 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1163 for_each_cpu_mask(cpu, mask)
1168 return numa_attach_mlgroup(md, grp, index);
1171 static int __init numa_parse_mdesc(void)
1173 struct mdesc_handle *md = mdesc_grab();
1177 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1178 if (node == MDESC_NODE_NULL) {
1183 err = grab_mblocks(md);
1187 err = grab_mlgroups(md);
1192 mdesc_for_each_node_by_name(md, node, "group") {
1193 err = numa_parse_mdesc_group(md, node, count);
1201 for (i = 0; i < num_node_masks; i++) {
1202 allocate_node_data(i);
1212 static int __init numa_parse_jbus(void)
1214 unsigned long cpu, index;
1216 /* NUMA node id is encoded in bits 36 and higher, and there is
1217 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1220 for_each_present_cpu(cpu) {
1221 numa_cpu_lookup_table[cpu] = index;
1222 numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu);
1223 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1224 node_masks[index].val = cpu << 36UL;
1228 num_node_masks = index;
1232 for (index = 0; index < num_node_masks; index++) {
1233 allocate_node_data(index);
1234 node_set_online(index);
1240 static int __init numa_parse_sun4u(void)
1242 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1245 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1246 if ((ver >> 32UL) == __JALAPENO_ID ||
1247 (ver >> 32UL) == __SERRANO_ID)
1248 return numa_parse_jbus();
1253 static int __init bootmem_init_numa(void)
1257 numadbg("bootmem_init_numa()\n");
1260 if (tlb_type == hypervisor)
1261 err = numa_parse_mdesc();
1263 err = numa_parse_sun4u();
1270 static int bootmem_init_numa(void)
1277 static void __init bootmem_init_nonnuma(void)
1279 unsigned long top_of_ram = lmb_end_of_DRAM();
1280 unsigned long total_ram = lmb_phys_mem_size();
1283 numadbg("bootmem_init_nonnuma()\n");
1285 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1286 top_of_ram, total_ram);
1287 printk(KERN_INFO "Memory hole size: %ldMB\n",
1288 (top_of_ram - total_ram) >> 20);
1290 init_node_masks_nonnuma();
1292 for (i = 0; i < lmb.memory.cnt; i++) {
1293 unsigned long size = lmb_size_bytes(&lmb.memory, i);
1294 unsigned long start_pfn, end_pfn;
1299 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
1300 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
1301 add_active_range(0, start_pfn, end_pfn);
1304 allocate_node_data(0);
1309 static void __init reserve_range_in_node(int nid, unsigned long start,
1312 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1314 while (start < end) {
1315 unsigned long this_end;
1318 this_end = nid_range(start, end, &n);
1320 numadbg(" MATCH reserving range [%lx:%lx]\n",
1322 reserve_bootmem_node(NODE_DATA(nid), start,
1323 (this_end - start), BOOTMEM_DEFAULT);
1325 numadbg(" NO MATCH, advancing start to %lx\n",
1332 static void __init trim_reserved_in_node(int nid)
1336 numadbg(" trim_reserved_in_node(%d)\n", nid);
1338 for (i = 0; i < lmb.reserved.cnt; i++) {
1339 unsigned long start = lmb.reserved.region[i].base;
1340 unsigned long size = lmb_size_bytes(&lmb.reserved, i);
1341 unsigned long end = start + size;
1343 reserve_range_in_node(nid, start, end);
1347 static void __init bootmem_init_one_node(int nid)
1349 struct pglist_data *p;
1351 numadbg("bootmem_init_one_node(%d)\n", nid);
1355 if (p->node_spanned_pages) {
1356 unsigned long paddr = node_masks[nid].bootmem_paddr;
1357 unsigned long end_pfn;
1359 end_pfn = p->node_start_pfn + p->node_spanned_pages;
1361 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1362 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
1364 init_bootmem_node(p, paddr >> PAGE_SHIFT,
1365 p->node_start_pfn, end_pfn);
1367 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1369 free_bootmem_with_active_regions(nid, end_pfn);
1371 trim_reserved_in_node(nid);
1373 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1375 sparse_memory_present_with_active_regions(nid);
1379 static unsigned long __init bootmem_init(unsigned long phys_base)
1381 unsigned long end_pfn;
1384 end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
1385 max_pfn = max_low_pfn = end_pfn;
1386 min_low_pfn = (phys_base >> PAGE_SHIFT);
1388 if (bootmem_init_numa() < 0)
1389 bootmem_init_nonnuma();
1391 /* XXX cpu notifier XXX */
1393 for_each_online_node(nid)
1394 bootmem_init_one_node(nid);
1401 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1402 static int pall_ents __initdata;
1404 #ifdef CONFIG_DEBUG_PAGEALLOC
1405 static unsigned long __ref kernel_map_range(unsigned long pstart,
1406 unsigned long pend, pgprot_t prot)
1408 unsigned long vstart = PAGE_OFFSET + pstart;
1409 unsigned long vend = PAGE_OFFSET + pend;
1410 unsigned long alloc_bytes = 0UL;
1412 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1413 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1418 while (vstart < vend) {
1419 unsigned long this_end, paddr = __pa(vstart);
1420 pgd_t *pgd = pgd_offset_k(vstart);
1425 pud = pud_offset(pgd, vstart);
1426 if (pud_none(*pud)) {
1429 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1430 alloc_bytes += PAGE_SIZE;
1431 pud_populate(&init_mm, pud, new);
1434 pmd = pmd_offset(pud, vstart);
1435 if (!pmd_present(*pmd)) {
1438 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1439 alloc_bytes += PAGE_SIZE;
1440 pmd_populate_kernel(&init_mm, pmd, new);
1443 pte = pte_offset_kernel(pmd, vstart);
1444 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1445 if (this_end > vend)
1448 while (vstart < this_end) {
1449 pte_val(*pte) = (paddr | pgprot_val(prot));
1451 vstart += PAGE_SIZE;
1460 extern unsigned int kvmap_linear_patch[1];
1461 #endif /* CONFIG_DEBUG_PAGEALLOC */
1463 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1465 const unsigned long shift_256MB = 28;
1466 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1467 const unsigned long size_256MB = (1UL << shift_256MB);
1469 while (start < end) {
1472 remains = end - start;
1473 if (remains < size_256MB)
1476 if (start & mask_256MB) {
1477 start = (start + size_256MB) & ~mask_256MB;
1481 while (remains >= size_256MB) {
1482 unsigned long index = start >> shift_256MB;
1484 __set_bit(index, kpte_linear_bitmap);
1486 start += size_256MB;
1487 remains -= size_256MB;
1492 static void __init init_kpte_bitmap(void)
1496 for (i = 0; i < pall_ents; i++) {
1497 unsigned long phys_start, phys_end;
1499 phys_start = pall[i].phys_addr;
1500 phys_end = phys_start + pall[i].reg_size;
1502 mark_kpte_bitmap(phys_start, phys_end);
1506 static void __init kernel_physical_mapping_init(void)
1508 #ifdef CONFIG_DEBUG_PAGEALLOC
1509 unsigned long i, mem_alloced = 0UL;
1511 for (i = 0; i < pall_ents; i++) {
1512 unsigned long phys_start, phys_end;
1514 phys_start = pall[i].phys_addr;
1515 phys_end = phys_start + pall[i].reg_size;
1517 mem_alloced += kernel_map_range(phys_start, phys_end,
1521 printk("Allocated %ld bytes for kernel page tables.\n",
1524 kvmap_linear_patch[0] = 0x01000000; /* nop */
1525 flushi(&kvmap_linear_patch[0]);
1531 #ifdef CONFIG_DEBUG_PAGEALLOC
1532 void kernel_map_pages(struct page *page, int numpages, int enable)
1534 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1535 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1537 kernel_map_range(phys_start, phys_end,
1538 (enable ? PAGE_KERNEL : __pgprot(0)));
1540 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1541 PAGE_OFFSET + phys_end);
1543 /* we should perform an IPI and flush all tlbs,
1544 * but that can deadlock->flush only current cpu.
1546 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1547 PAGE_OFFSET + phys_end);
1551 unsigned long __init find_ecache_flush_span(unsigned long size)
1555 for (i = 0; i < pavail_ents; i++) {
1556 if (pavail[i].reg_size >= size)
1557 return pavail[i].phys_addr;
1563 static void __init tsb_phys_patch(void)
1565 struct tsb_ldquad_phys_patch_entry *pquad;
1566 struct tsb_phys_patch_entry *p;
1568 pquad = &__tsb_ldquad_phys_patch;
1569 while (pquad < &__tsb_ldquad_phys_patch_end) {
1570 unsigned long addr = pquad->addr;
1572 if (tlb_type == hypervisor)
1573 *(unsigned int *) addr = pquad->sun4v_insn;
1575 *(unsigned int *) addr = pquad->sun4u_insn;
1577 __asm__ __volatile__("flush %0"
1584 p = &__tsb_phys_patch;
1585 while (p < &__tsb_phys_patch_end) {
1586 unsigned long addr = p->addr;
1588 *(unsigned int *) addr = p->insn;
1590 __asm__ __volatile__("flush %0"
1598 /* Don't mark as init, we give this to the Hypervisor. */
1599 #ifndef CONFIG_DEBUG_PAGEALLOC
1600 #define NUM_KTSB_DESCR 2
1602 #define NUM_KTSB_DESCR 1
1604 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1605 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1607 static void __init sun4v_ktsb_init(void)
1609 unsigned long ktsb_pa;
1611 /* First KTSB for PAGE_SIZE mappings. */
1612 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1614 switch (PAGE_SIZE) {
1617 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1618 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1622 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1623 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1627 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1628 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1631 case 4 * 1024 * 1024:
1632 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1633 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1637 ktsb_descr[0].assoc = 1;
1638 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1639 ktsb_descr[0].ctx_idx = 0;
1640 ktsb_descr[0].tsb_base = ktsb_pa;
1641 ktsb_descr[0].resv = 0;
1643 #ifndef CONFIG_DEBUG_PAGEALLOC
1644 /* Second KTSB for 4MB/256MB mappings. */
1645 ktsb_pa = (kern_base +
1646 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1648 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1649 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1650 HV_PGSZ_MASK_256MB);
1651 ktsb_descr[1].assoc = 1;
1652 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1653 ktsb_descr[1].ctx_idx = 0;
1654 ktsb_descr[1].tsb_base = ktsb_pa;
1655 ktsb_descr[1].resv = 0;
1659 void __cpuinit sun4v_ktsb_register(void)
1661 unsigned long pa, ret;
1663 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1665 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1667 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1668 "errors with %lx\n", pa, ret);
1673 /* paging_init() sets up the page tables */
1675 static unsigned long last_valid_pfn;
1676 pgd_t swapper_pg_dir[2048];
1678 static void sun4u_pgprot_init(void);
1679 static void sun4v_pgprot_init(void);
1681 /* Dummy function */
1682 void __init setup_per_cpu_areas(void)
1686 void __init paging_init(void)
1688 unsigned long end_pfn, shift, phys_base;
1689 unsigned long real_end, i;
1691 /* These build time checkes make sure that the dcache_dirty_cpu()
1692 * page->flags usage will work.
1694 * When a page gets marked as dcache-dirty, we store the
1695 * cpu number starting at bit 32 in the page->flags. Also,
1696 * functions like clear_dcache_dirty_cpu use the cpu mask
1697 * in 13-bit signed-immediate instruction fields.
1701 * Page flags must not reach into upper 32 bits that are used
1702 * for the cpu number
1704 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1707 * The bit fields placed in the high range must not reach below
1708 * the 32 bit boundary. Otherwise we cannot place the cpu field
1709 * at the 32 bit boundary.
1711 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1712 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1714 BUILD_BUG_ON(NR_CPUS > 4096);
1716 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1717 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1719 /* Invalidate both kernel TSBs. */
1720 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1721 #ifndef CONFIG_DEBUG_PAGEALLOC
1722 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1725 if (tlb_type == hypervisor)
1726 sun4v_pgprot_init();
1728 sun4u_pgprot_init();
1730 if (tlb_type == cheetah_plus ||
1731 tlb_type == hypervisor)
1734 if (tlb_type == hypervisor) {
1735 sun4v_patch_tlb_handlers();
1741 /* Find available physical memory...
1743 * Read it twice in order to work around a bug in openfirmware.
1744 * The call to grab this table itself can cause openfirmware to
1745 * allocate memory, which in turn can take away some space from
1746 * the list of available memory. Reading it twice makes sure
1747 * we really do get the final value.
1749 read_obp_translations();
1750 read_obp_memory("reg", &pall[0], &pall_ents);
1751 read_obp_memory("available", &pavail[0], &pavail_ents);
1752 read_obp_memory("available", &pavail[0], &pavail_ents);
1754 phys_base = 0xffffffffffffffffUL;
1755 for (i = 0; i < pavail_ents; i++) {
1756 phys_base = min(phys_base, pavail[i].phys_addr);
1757 lmb_add(pavail[i].phys_addr, pavail[i].reg_size);
1760 lmb_reserve(kern_base, kern_size);
1762 find_ramdisk(phys_base);
1764 lmb_enforce_memory_limit(cmdline_memory_size);
1769 set_bit(0, mmu_context_bmap);
1771 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1773 real_end = (unsigned long)_end;
1774 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1775 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1776 num_kernel_image_mappings);
1778 /* Set kernel pgd to upper alias so physical page computations
1781 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1783 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1785 /* Now can init the kernel/bad page tables. */
1786 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1787 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1789 inherit_prom_mappings();
1793 /* Ok, we can use our TLB miss and window trap handlers safely. */
1798 if (tlb_type == hypervisor)
1799 sun4v_ktsb_register();
1801 /* We must setup the per-cpu areas before we pull in the
1802 * PROM and the MDESC. The code there fills in cpu and
1803 * other information into per-cpu data structures.
1805 real_setup_per_cpu_areas();
1807 prom_build_devicetree();
1809 if (tlb_type == hypervisor)
1812 /* Once the OF device tree and MDESC have been setup, we know
1813 * the list of possible cpus. Therefore we can allocate the
1816 for_each_possible_cpu(i) {
1817 /* XXX Use node local allocations... XXX */
1818 softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1819 hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1822 /* Setup bootmem... */
1823 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1825 #ifndef CONFIG_NEED_MULTIPLE_NODES
1826 max_mapnr = last_valid_pfn;
1828 kernel_physical_mapping_init();
1831 unsigned long max_zone_pfns[MAX_NR_ZONES];
1833 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1835 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1837 free_area_init_nodes(max_zone_pfns);
1840 printk("Booting Linux...\n");
1843 int __init page_in_phys_avail(unsigned long paddr)
1849 for (i = 0; i < pavail_ents; i++) {
1850 unsigned long start, end;
1852 start = pavail[i].phys_addr;
1853 end = start + pavail[i].reg_size;
1855 if (paddr >= start && paddr < end)
1858 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1860 #ifdef CONFIG_BLK_DEV_INITRD
1861 if (paddr >= __pa(initrd_start) &&
1862 paddr < __pa(PAGE_ALIGN(initrd_end)))
1869 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1870 static int pavail_rescan_ents __initdata;
1872 /* Certain OBP calls, such as fetching "available" properties, can
1873 * claim physical memory. So, along with initializing the valid
1874 * address bitmap, what we do here is refetch the physical available
1875 * memory list again, and make sure it provides at least as much
1876 * memory as 'pavail' does.
1878 static void __init setup_valid_addr_bitmap_from_pavail(void)
1882 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1884 for (i = 0; i < pavail_ents; i++) {
1885 unsigned long old_start, old_end;
1887 old_start = pavail[i].phys_addr;
1888 old_end = old_start + pavail[i].reg_size;
1889 while (old_start < old_end) {
1892 for (n = 0; n < pavail_rescan_ents; n++) {
1893 unsigned long new_start, new_end;
1895 new_start = pavail_rescan[n].phys_addr;
1896 new_end = new_start +
1897 pavail_rescan[n].reg_size;
1899 if (new_start <= old_start &&
1900 new_end >= (old_start + PAGE_SIZE)) {
1901 set_bit(old_start >> 22,
1902 sparc64_valid_addr_bitmap);
1907 prom_printf("mem_init: Lost memory in pavail\n");
1908 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1909 pavail[i].phys_addr,
1910 pavail[i].reg_size);
1911 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1912 pavail_rescan[i].phys_addr,
1913 pavail_rescan[i].reg_size);
1914 prom_printf("mem_init: Cannot continue, aborting.\n");
1918 old_start += PAGE_SIZE;
1923 void __init mem_init(void)
1925 unsigned long codepages, datapages, initpages;
1926 unsigned long addr, last;
1929 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1931 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1932 if (sparc64_valid_addr_bitmap == NULL) {
1933 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1936 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1938 addr = PAGE_OFFSET + kern_base;
1939 last = PAGE_ALIGN(kern_size) + addr;
1940 while (addr < last) {
1941 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1945 setup_valid_addr_bitmap_from_pavail();
1947 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1949 #ifdef CONFIG_NEED_MULTIPLE_NODES
1950 for_each_online_node(i) {
1951 if (NODE_DATA(i)->node_spanned_pages != 0) {
1953 free_all_bootmem_node(NODE_DATA(i));
1957 totalram_pages = free_all_bootmem();
1960 /* We subtract one to account for the mem_map_zero page
1963 totalram_pages -= 1;
1964 num_physpages = totalram_pages;
1967 * Set up the zero page, mark it reserved, so that page count
1968 * is not manipulated when freeing the page from user ptes.
1970 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1971 if (mem_map_zero == NULL) {
1972 prom_printf("paging_init: Cannot alloc zero page.\n");
1975 SetPageReserved(mem_map_zero);
1977 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1978 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1979 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1980 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1981 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1982 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1984 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1985 nr_free_pages() << (PAGE_SHIFT-10),
1986 codepages << (PAGE_SHIFT-10),
1987 datapages << (PAGE_SHIFT-10),
1988 initpages << (PAGE_SHIFT-10),
1989 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1991 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1992 cheetah_ecache_flush_init();
1995 void free_initmem(void)
1997 unsigned long addr, initend;
2000 /* If the physical memory maps were trimmed by kernel command
2001 * line options, don't even try freeing this initmem stuff up.
2002 * The kernel image could have been in the trimmed out region
2003 * and if so the freeing below will free invalid page structs.
2005 if (cmdline_memory_size)
2009 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2011 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2012 initend = (unsigned long)(__init_end) & PAGE_MASK;
2013 for (; addr < initend; addr += PAGE_SIZE) {
2018 ((unsigned long) __va(kern_base)) -
2019 ((unsigned long) KERNBASE));
2020 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2023 p = virt_to_page(page);
2025 ClearPageReserved(p);
2034 #ifdef CONFIG_BLK_DEV_INITRD
2035 void free_initrd_mem(unsigned long start, unsigned long end)
2038 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2039 for (; start < end; start += PAGE_SIZE) {
2040 struct page *p = virt_to_page(start);
2042 ClearPageReserved(p);
2051 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2052 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2053 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2054 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2055 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2056 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2058 pgprot_t PAGE_KERNEL __read_mostly;
2059 EXPORT_SYMBOL(PAGE_KERNEL);
2061 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2062 pgprot_t PAGE_COPY __read_mostly;
2064 pgprot_t PAGE_SHARED __read_mostly;
2065 EXPORT_SYMBOL(PAGE_SHARED);
2067 pgprot_t PAGE_EXEC __read_mostly;
2068 unsigned long pg_iobits __read_mostly;
2070 unsigned long _PAGE_IE __read_mostly;
2071 EXPORT_SYMBOL(_PAGE_IE);
2073 unsigned long _PAGE_E __read_mostly;
2074 EXPORT_SYMBOL(_PAGE_E);
2076 unsigned long _PAGE_CACHE __read_mostly;
2077 EXPORT_SYMBOL(_PAGE_CACHE);
2079 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2081 #define VMEMMAP_CHUNK_SHIFT 22
2082 #define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
2083 #define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
2084 #define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
2086 #define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
2087 sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
2088 unsigned long vmemmap_table[VMEMMAP_SIZE];
2090 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2092 unsigned long vstart = (unsigned long) start;
2093 unsigned long vend = (unsigned long) (start + nr);
2094 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2095 unsigned long phys_end = (vend - VMEMMAP_BASE);
2096 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2097 unsigned long end = VMEMMAP_ALIGN(phys_end);
2098 unsigned long pte_base;
2100 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2101 _PAGE_CP_4U | _PAGE_CV_4U |
2102 _PAGE_P_4U | _PAGE_W_4U);
2103 if (tlb_type == hypervisor)
2104 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2105 _PAGE_CP_4V | _PAGE_CV_4V |
2106 _PAGE_P_4V | _PAGE_W_4V);
2108 for (; addr < end; addr += VMEMMAP_CHUNK) {
2109 unsigned long *vmem_pp =
2110 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2113 if (!(*vmem_pp & _PAGE_VALID)) {
2114 block = vmemmap_alloc_block(1UL << 22, node);
2118 *vmem_pp = pte_base | __pa(block);
2120 printk(KERN_INFO "[%p-%p] page_structs=%lu "
2121 "node=%d entry=%lu/%lu\n", start, block, nr,
2123 addr >> VMEMMAP_CHUNK_SHIFT,
2124 VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
2129 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2131 static void prot_init_common(unsigned long page_none,
2132 unsigned long page_shared,
2133 unsigned long page_copy,
2134 unsigned long page_readonly,
2135 unsigned long page_exec_bit)
2137 PAGE_COPY = __pgprot(page_copy);
2138 PAGE_SHARED = __pgprot(page_shared);
2140 protection_map[0x0] = __pgprot(page_none);
2141 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2142 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2143 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2144 protection_map[0x4] = __pgprot(page_readonly);
2145 protection_map[0x5] = __pgprot(page_readonly);
2146 protection_map[0x6] = __pgprot(page_copy);
2147 protection_map[0x7] = __pgprot(page_copy);
2148 protection_map[0x8] = __pgprot(page_none);
2149 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2150 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2151 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2152 protection_map[0xc] = __pgprot(page_readonly);
2153 protection_map[0xd] = __pgprot(page_readonly);
2154 protection_map[0xe] = __pgprot(page_shared);
2155 protection_map[0xf] = __pgprot(page_shared);
2158 static void __init sun4u_pgprot_init(void)
2160 unsigned long page_none, page_shared, page_copy, page_readonly;
2161 unsigned long page_exec_bit;
2163 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2164 _PAGE_CACHE_4U | _PAGE_P_4U |
2165 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2167 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2168 _PAGE_CACHE_4U | _PAGE_P_4U |
2169 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2170 _PAGE_EXEC_4U | _PAGE_L_4U);
2171 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
2173 _PAGE_IE = _PAGE_IE_4U;
2174 _PAGE_E = _PAGE_E_4U;
2175 _PAGE_CACHE = _PAGE_CACHE_4U;
2177 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2178 __ACCESS_BITS_4U | _PAGE_E_4U);
2180 #ifdef CONFIG_DEBUG_PAGEALLOC
2181 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
2184 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2187 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2188 _PAGE_P_4U | _PAGE_W_4U);
2190 /* XXX Should use 256MB on Panther. XXX */
2191 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2193 _PAGE_SZBITS = _PAGE_SZBITS_4U;
2194 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2195 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2196 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2199 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2200 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2201 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2202 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2203 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2204 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2205 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2207 page_exec_bit = _PAGE_EXEC_4U;
2209 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2213 static void __init sun4v_pgprot_init(void)
2215 unsigned long page_none, page_shared, page_copy, page_readonly;
2216 unsigned long page_exec_bit;
2218 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2219 _PAGE_CACHE_4V | _PAGE_P_4V |
2220 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2222 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2223 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
2225 _PAGE_IE = _PAGE_IE_4V;
2226 _PAGE_E = _PAGE_E_4V;
2227 _PAGE_CACHE = _PAGE_CACHE_4V;
2229 #ifdef CONFIG_DEBUG_PAGEALLOC
2230 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2233 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2236 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2237 _PAGE_P_4V | _PAGE_W_4V);
2239 #ifdef CONFIG_DEBUG_PAGEALLOC
2240 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2243 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2246 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2247 _PAGE_P_4V | _PAGE_W_4V);
2249 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2250 __ACCESS_BITS_4V | _PAGE_E_4V);
2252 _PAGE_SZBITS = _PAGE_SZBITS_4V;
2253 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2254 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2255 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2256 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2258 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2259 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2260 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2261 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2262 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2263 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2264 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2266 page_exec_bit = _PAGE_EXEC_4V;
2268 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2272 unsigned long pte_sz_bits(unsigned long sz)
2274 if (tlb_type == hypervisor) {
2278 return _PAGE_SZ8K_4V;
2280 return _PAGE_SZ64K_4V;
2282 return _PAGE_SZ512K_4V;
2283 case 4 * 1024 * 1024:
2284 return _PAGE_SZ4MB_4V;
2290 return _PAGE_SZ8K_4U;
2292 return _PAGE_SZ64K_4U;
2294 return _PAGE_SZ512K_4U;
2295 case 4 * 1024 * 1024:
2296 return _PAGE_SZ4MB_4U;
2301 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2305 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2306 pte_val(pte) |= (((unsigned long)space) << 32);
2307 pte_val(pte) |= pte_sz_bits(page_size);
2312 static unsigned long kern_large_tte(unsigned long paddr)
2316 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2317 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2318 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2319 if (tlb_type == hypervisor)
2320 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2321 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2322 _PAGE_EXEC_4V | _PAGE_W_4V);
2327 /* If not locked, zap it. */
2328 void __flush_tlb_all(void)
2330 unsigned long pstate;
2333 __asm__ __volatile__("flushw\n\t"
2334 "rdpr %%pstate, %0\n\t"
2335 "wrpr %0, %1, %%pstate"
2338 if (tlb_type == hypervisor) {
2339 sun4v_mmu_demap_all();
2340 } else if (tlb_type == spitfire) {
2341 for (i = 0; i < 64; i++) {
2342 /* Spitfire Errata #32 workaround */
2343 /* NOTE: Always runs on spitfire, so no
2344 * cheetah+ page size encodings.
2346 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2350 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2352 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2353 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2356 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2357 spitfire_put_dtlb_data(i, 0x0UL);
2360 /* Spitfire Errata #32 workaround */
2361 /* NOTE: Always runs on spitfire, so no
2362 * cheetah+ page size encodings.
2364 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2368 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2370 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2371 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2374 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2375 spitfire_put_itlb_data(i, 0x0UL);
2378 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2379 cheetah_flush_dtlb_all();
2380 cheetah_flush_itlb_all();
2382 __asm__ __volatile__("wrpr %0, 0, %%pstate"