]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/ia64/mm/discontig.c
[IA64] Minimize per_cpu reservations.
[linux-2.6-omap-h63xx.git] / arch / ia64 / mm / discontig.c
index 0d34585058c8d0044eb97af1370543e34afb2167..6136a4c6df11dddf90560eaf13f93537216685f3 100644 (file)
@@ -48,7 +48,7 @@ struct early_node_data {
 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
 static nodemask_t memory_less_mask __initdata;
 
-static pg_data_t *pgdat_list[MAX_NUMNODES];
+pg_data_t *pgdat_list[MAX_NUMNODES];
 
 /*
  * To prevent cache aliasing effects, align per-node structures so that they
@@ -104,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node)
 {
        int cpu, n = 0;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_possible_early_cpu(cpu)
                if (node == node_cpuid[cpu].nid)
                        n++;
 
@@ -124,6 +124,7 @@ static unsigned long __meminit compute_pernodesize(int node)
        pernodesize += node * L1_CACHE_BYTES;
        pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
+       pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize = PAGE_ALIGN(pernodesize);
        return pernodesize;
 }
@@ -142,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 #ifdef CONFIG_SMP
        int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                if (node == node_cpuid[cpu].nid) {
                        memcpy(__va(cpu_data), __phys_per_cpu_start,
                               __per_cpu_end - __per_cpu_start);
@@ -299,12 +300,12 @@ static void __init reserve_pernode_space(void)
                pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
                size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
                base = __pa(bdp->node_bootmem_map);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
 
                /* Now the per-node space */
                size = mem_data[node].pernode_size;
                base = __pa(mem_data[node].pernode_addr);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
        }
 }
 
@@ -345,7 +346,7 @@ static void __init initialize_pernode_data(void)
 
 #ifdef CONFIG_SMP
        /* Set the node_data pointer for each per-cpu struct */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                node = node_cpuid[cpu].nid;
                per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
        }
@@ -493,13 +494,9 @@ void __cpuinit *per_cpu_init(void)
        int cpu;
        static int first_time = 1;
 
-
-       if (smp_processor_id() != 0)
-               return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
        if (first_time) {
                first_time = 0;
-               for (cpu = 0; cpu < NR_CPUS; cpu++)
+               for_each_possible_early_cpu(cpu)
                        per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
        }
 
@@ -715,3 +712,11 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
        scatter_node_data();
 }
 #endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(struct page *start_page,
+                                               unsigned long size, int node)
+{
+       return vmemmap_populate_basepages(start_page, size, node);
+}
+#endif