]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/powerpc/mm/numa.c
powerpc: Fix bootmem reservation on uninitialized node
[linux-2.6-omap-h63xx.git] / arch / powerpc / mm / numa.c
index eb505ad34a857421b2deb85bbedd7b4c6ba99777..cf81049e1e51a0ce98d0ebdf9b7a74d5901de211 100644 (file)
@@ -865,10 +865,77 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
        .priority = 1 /* Must run before sched domains notifier. */
 };
 
+static void mark_reserved_regions_for_nid(int nid)
+{
+       struct pglist_data *node = NODE_DATA(nid);
+       int i;
+
+       for (i = 0; i < lmb.reserved.cnt; i++) {
+               unsigned long physbase = lmb.reserved.region[i].base;
+               unsigned long size = lmb.reserved.region[i].size;
+               unsigned long start_pfn = physbase >> PAGE_SHIFT;
+               unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
+               struct node_active_region node_ar;
+               unsigned long node_end_pfn = node->node_start_pfn +
+                                            node->node_spanned_pages;
+
+               /*
+                * Check to make sure that this lmb.reserved area is
+                * within the bounds of the node that we care about.
+                * Checking the nid of the start and end points is not
+                * sufficient because the reserved area could span the
+                * entire node.
+                */
+               if (end_pfn <= node->node_start_pfn ||
+                   start_pfn >= node_end_pfn)
+                       continue;
+
+               get_node_active_region(start_pfn, &node_ar);
+               while (start_pfn < end_pfn &&
+                       node_ar.start_pfn < node_ar.end_pfn) {
+                       unsigned long reserve_size = size;
+                       /*
+                        * if reserved region extends past active region
+                        * then trim size to active region
+                        */
+                       if (end_pfn > node_ar.end_pfn)
+                               reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
+                                       - (start_pfn << PAGE_SHIFT);
+                       /*
+                        * Only worry about *this* node, others may not
+                        * yet have valid NODE_DATA().
+                        */
+                       if (node_ar.nid == nid) {
+                               dbg("reserve_bootmem %lx %lx nid=%d\n",
+                                       physbase, reserve_size, node_ar.nid);
+                               reserve_bootmem_node(NODE_DATA(node_ar.nid),
+                                               physbase, reserve_size,
+                                               BOOTMEM_DEFAULT);
+                       }
+                       /*
+                        * if reserved region is contained in the active region
+                        * then done.
+                        */
+                       if (end_pfn <= node_ar.end_pfn)
+                               break;
+
+                       /*
+                        * reserved region extends past the active region
+                        *   get next active region that contains this
+                        *   reserved region
+                        */
+                       start_pfn = node_ar.end_pfn;
+                       physbase = start_pfn << PAGE_SHIFT;
+                       size = size - reserve_size;
+                       get_node_active_region(start_pfn, &node_ar);
+               }
+       }
+}
+
+
 void __init do_init_bootmem(void)
 {
        int nid;
-       unsigned int i;
 
        min_low_pfn = 0;
        max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
@@ -890,7 +957,13 @@ void __init do_init_bootmem(void)
 
                get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 
-               /* Allocate the node structure node local if possible */
+               /*
+                * Allocate the node structure node local if possible
+                *
+                * Be careful moving this around, as it relies on all
+                * previous nodes' bootmem to be initialized and have
+                * all reserved areas marked.
+                */
                NODE_DATA(nid) = careful_allocation(nid,
                                        sizeof(struct pglist_data),
                                        SMP_CACHE_BYTES, end_pfn);
@@ -922,53 +995,14 @@ void __init do_init_bootmem(void)
                                  start_pfn, end_pfn);
 
                free_bootmem_with_active_regions(nid, end_pfn);
-       }
-
-       /* Mark reserved regions */
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               unsigned long physbase = lmb.reserved.region[i].base;
-               unsigned long size = lmb.reserved.region[i].size;
-               unsigned long start_pfn = physbase >> PAGE_SHIFT;
-               unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
-               struct node_active_region node_ar;
-
-               get_node_active_region(start_pfn, &node_ar);
-               while (start_pfn < end_pfn &&
-                       node_ar.start_pfn < node_ar.end_pfn) {
-                       unsigned long reserve_size = size;
-                       /*
-                        * if reserved region extends past active region
-                        * then trim size to active region
-                        */
-                       if (end_pfn > node_ar.end_pfn)
-                               reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
-                                       - (start_pfn << PAGE_SHIFT);
-                       dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
-                               reserve_size, node_ar.nid);
-                       reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
-                                               reserve_size, BOOTMEM_DEFAULT);
-                       /*
-                        * if reserved region is contained in the active region
-                        * then done.
-                        */
-                       if (end_pfn <= node_ar.end_pfn)
-                               break;
-
-                       /*
-                        * reserved region extends past the active region
-                        *   get next active region that contains this
-                        *   reserved region
-                        */
-                       start_pfn = node_ar.end_pfn;
-                       physbase = start_pfn << PAGE_SHIFT;
-                       size = size - reserve_size;
-                       get_node_active_region(start_pfn, &node_ar);
-               }
-
-       }
-
-       for_each_online_node(nid)
+               /*
+                * Be very careful about moving this around.  Future
+                * calls to careful_allocation() depend on this getting
+                * done correctly.
+                */
+               mark_reserved_regions_for_nid(nid);
                sparse_memory_present_with_active_regions(nid);
+       }
 }
 
 void __init paging_init(void)