]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/page_alloc.c
setup_per_zone_pages_min(): take zone->lock instead of zone->lru_lock
[linux-2.6-omap-h63xx.git] / mm / page_alloc.c
index 79c0981b1d32c9c11504a0caa5021c9b94be8d1e..f2fc44ec1d44192c5f39bf4f68a52a1c6a248dbf 100644 (file)
@@ -454,6 +454,7 @@ static inline void __free_one_page(struct page *page,
 
 static inline int free_pages_check(struct page *page)
 {
+       free_page_mlock(page);
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
@@ -616,7 +617,11 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 
        page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
                        1 << PG_referenced | 1 << PG_arch_1 |
-                       1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
+                       1 << PG_owner_priv_1 | 1 << PG_mappedtodisk
+#ifdef CONFIG_UNEVICTABLE_LRU
+                       | 1 << PG_mlocked
+#endif
+                       );
        set_page_private(page, 0);
        set_page_refcounted(page);
 
@@ -1864,13 +1869,21 @@ void show_free_areas(void)
                }
        }
 
-       printk("Active_anon:%lu active_file:%lu inactive_anon%lu\n"
-               " inactive_file:%lu dirty:%lu writeback:%lu unstable:%lu\n"
+       printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
+               " inactive_file:%lu"
+//TODO:  check/adjust line lengths
+#ifdef CONFIG_UNEVICTABLE_LRU
+               " unevictable:%lu"
+#endif
+               " dirty:%lu writeback:%lu unstable:%lu\n"
                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
                global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
                global_page_state(NR_INACTIVE_FILE),
+#ifdef CONFIG_UNEVICTABLE_LRU
+               global_page_state(NR_UNEVICTABLE),
+#endif
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
@@ -1897,6 +1910,9 @@ void show_free_areas(void)
                        " inactive_anon:%lukB"
                        " active_file:%lukB"
                        " inactive_file:%lukB"
+#ifdef CONFIG_UNEVICTABLE_LRU
+                       " unevictable:%lukB"
+#endif
                        " present:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
@@ -1910,6 +1926,9 @@ void show_free_areas(void)
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
+#ifdef CONFIG_UNEVICTABLE_LRU
+                       K(zone_page_state(zone, NR_UNEVICTABLE)),
+#endif
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
@@ -3438,8 +3457,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                        PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
                if (realsize >= memmap_pages) {
                        realsize -= memmap_pages;
-                       mminit_dprintk(MMINIT_TRACE, "memmap_init",
-                               "%s zone: %lu pages used for memmap\n",
+                       printk(KERN_DEBUG
+                               "  %s zone: %lu pages used for memmap\n",
                                zone_names[j], memmap_pages);
                } else
                        printk(KERN_WARNING
@@ -3449,8 +3468,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                /* Account for reserved pages */
                if (j == 0 && realsize > dma_reserve) {
                        realsize -= dma_reserve;
-                       mminit_dprintk(MMINIT_TRACE, "memmap_init",
-                                       "%s zone: %lu pages reserved\n",
+                       printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
                                        zone_names[0], dma_reserve);
                }
 
@@ -4224,7 +4242,7 @@ void setup_per_zone_pages_min(void)
        for_each_zone(zone) {
                u64 tmp;
 
-               spin_lock_irqsave(&zone->lru_lock, flags);
+               spin_lock_irqsave(&zone->lock, flags);
                tmp = (u64)pages_min * zone->present_pages;
                do_div(tmp, lowmem_pages);
                if (is_highmem(zone)) {
@@ -4256,7 +4274,7 @@ void setup_per_zone_pages_min(void)
                zone->pages_low   = zone->pages_min + (tmp >> 2);
                zone->pages_high  = zone->pages_min + (tmp >> 1);
                setup_zone_migrate_reserve(zone);
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(&zone->lock, flags);
        }
 
        /* update totalreserve_pages */