unsigned long v = ~map[i / BITS_PER_LONG];
 
                if (gofast && v == ~0UL) {
-                       int j, order;
+                       int order;
 
                        page = pfn_to_page(pfn);
                        count += BITS_PER_LONG;
-                       __ClearPageReserved(page);
                        order = ffs(BITS_PER_LONG) - 1;
-                       set_page_refs(page, order);
-                       for (j = 1; j < BITS_PER_LONG; j++) {
-                               if (j + 16 < BITS_PER_LONG)
-                                       prefetchw(page + j + 16);
-                               __ClearPageReserved(page + j);
-                               set_page_count(page + j, 0);
-                       }
-                       __free_pages(page, order);
+                       __free_pages_bootmem(page, order);
                        i += BITS_PER_LONG;
                        page += BITS_PER_LONG;
                } else if (v) {
                        for (m = 1; m && i < idx; m<<=1, page++, i++) {
                                if (v & m) {
                                        count++;
-                                       __ClearPageReserved(page);
-                                       set_page_refs(page, 0);
-                                       __free_page(page);
+                                       __free_pages_bootmem(page, 0);
                                }
                        }
                } else {
        count = 0;
        for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
                count++;
-               __ClearPageReserved(page);
-               set_page_count(page, 1);
-               __free_page(page);
+               __free_pages_bootmem(page, 0);
        }
        total += count;
        bdata->node_bootmem_map = NULL;
 
 unsigned long totalhigh_pages __read_mostly;
 long nr_swap_pages;
 
+static void fastcall free_hot_cold_page(struct page *page, int cold);
+
 /*
  * results with 256, 32 in the lowmem_reserve sysctl:
  *     1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
        local_irq_restore(flags);
 }
 
+/*
+ * permit the bootmem allocator to evade page validation on high-order frees
+ */
+void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+{
+       if (order == 0) {
+               __ClearPageReserved(page);
+               set_page_count(page, 0);
+
+               free_hot_cold_page(page, 0);
+       } else {
+               LIST_HEAD(list);
+               int loop;
+
+               for (loop = 0; loop < BITS_PER_LONG; loop++) {
+                       struct page *p = &page[loop];
+
+                       if (loop + 16 < BITS_PER_LONG)
+                               prefetchw(p + 16);
+                       __ClearPageReserved(p);
+                       set_page_count(p, 0);
+               }
+
+               arch_free_page(page, order);
+
+               mod_page_state(pgfree, 1 << order);
+
+               list_add(&page->lru, &list);
+               kernel_map_pages(page, 1 << order, 0);
+               free_pages_bulk(page_zone(page), 1, &list, order);
+       }
+}
+
 
 /*
  * The order of subdivision here is critical for the IO subsystem.
 /*
  * Free a 0-order page
  */
-static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
 static void fastcall free_hot_cold_page(struct page *page, int cold)
 {
        struct zone *zone = page_zone(page);