]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/ia64/mm/discontig.c
[IA64] Minimize per_cpu reservations.
[linux-2.6-omap-h63xx.git] / arch / ia64 / mm / discontig.c
index 0dbf0e81f8c00a4a42442f8b58a1a2c4fb35dda6..6136a4c6df11dddf90560eaf13f93537216685f3 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/nmi.h>
 #include <linux/swap.h>
 #include <linux/bootmem.h>
 #include <linux/acpi.h>
@@ -47,7 +48,7 @@ struct early_node_data {
 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
 static nodemask_t memory_less_mask __initdata;
 
-static pg_data_t *pgdat_list[MAX_NUMNODES];
+pg_data_t *pgdat_list[MAX_NUMNODES];
 
 /*
  * To prevent cache aliasing effects, align per-node structures so that they
@@ -103,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node)
 {
        int cpu, n = 0;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_possible_early_cpu(cpu)
                if (node == node_cpuid[cpu].nid)
                        n++;
 
@@ -123,6 +124,7 @@ static unsigned long __meminit compute_pernodesize(int node)
        pernodesize += node * L1_CACHE_BYTES;
        pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
+       pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize = PAGE_ALIGN(pernodesize);
        return pernodesize;
 }
@@ -141,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 #ifdef CONFIG_SMP
        int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                if (node == node_cpuid[cpu].nid) {
                        memcpy(__va(cpu_data), __phys_per_cpu_start,
                               __per_cpu_end - __per_cpu_start);
@@ -298,12 +300,12 @@ static void __init reserve_pernode_space(void)
                pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
                size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
                base = __pa(bdp->node_bootmem_map);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
 
                /* Now the per-node space */
                size = mem_data[node].pernode_size;
                base = __pa(mem_data[node].pernode_addr);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
        }
 }
 
@@ -344,7 +346,7 @@ static void __init initialize_pernode_data(void)
 
 #ifdef CONFIG_SMP
        /* Set the node_data pointer for each per-cpu struct */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                node = node_cpuid[cpu].nid;
                per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
        }
@@ -492,13 +494,9 @@ void __cpuinit *per_cpu_init(void)
        int cpu;
        static int first_time = 1;
 
-
-       if (smp_processor_id() != 0)
-               return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
        if (first_time) {
                first_time = 0;
-               for (cpu = 0; cpu < NR_CPUS; cpu++)
+               for_each_possible_early_cpu(cpu)
                        per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
        }
 
@@ -533,6 +531,8 @@ void show_mem(void)
                present = pgdat->node_present_pages;
                for(i = 0; i < pgdat->node_spanned_pages; i++) {
                        struct page *page;
+                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+                               touch_nmi_watchdog();
                        if (pfn_valid(pgdat->node_start_pfn + i))
                                page = pfn_to_page(pgdat->node_start_pfn + i);
                        else {
@@ -712,3 +712,11 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
        scatter_node_data();
 }
 #endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(struct page *start_page,
+                                               unsigned long size, int node)
+{
+       return vmemmap_populate_basepages(start_page, size, node);
+}
+#endif