]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/page_alloc.c
autofs4: correct offset mount expire check
[linux-2.6-omap-h63xx.git] / mm / page_alloc.c
index 4125230a1b2c11cc024da80e543d09e252cc8766..d0a240fbb8bfc34f5304ee896af7964442230d4d 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
-#include <linux/memcontrol.h>
+#include <linux/page_cgroup.h>
 #include <linux/debugobjects.h>
 
 #include <asm/tlbflush.h>
@@ -223,17 +223,12 @@ static inline int bad_range(struct zone *zone, struct page *page)
 
 static void bad_page(struct page *page)
 {
-       void *pc = page_get_page_cgroup(page);
-
        printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
                "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
                current->comm, page, (int)(2*sizeof(unsigned long)),
                (unsigned long)page->flags, page->mapping,
                page_mapcount(page), page_count(page));
-       if (pc) {
-               printk(KERN_EMERG "cgroup:%p\n", pc);
-               page_reset_bad_cgroup(page);
-       }
+
        printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
                KERN_EMERG "Backtrace:\n");
        dump_stack();
@@ -454,9 +449,9 @@ static inline void __free_one_page(struct page *page,
 
 static inline int free_pages_check(struct page *page)
 {
+       free_page_mlock(page);
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
                (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
                bad_page(page);
@@ -602,7 +597,6 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
                (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
                bad_page(page);
@@ -616,7 +610,11 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 
        page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
                        1 << PG_referenced | 1 << PG_arch_1 |
-                       1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
+                       1 << PG_owner_priv_1 | 1 << PG_mappedtodisk
+#ifdef CONFIG_UNEVICTABLE_LRU
+                       | 1 << PG_mlocked
+#endif
+                       );
        set_page_private(page, 0);
        set_page_refcounted(page);
 
@@ -3433,6 +3431,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
        pgdat->nr_zones = 0;
        init_waitqueue_head(&pgdat->kswapd_wait);
        pgdat->kswapd_max_order = 0;
+       pgdat_page_cgroup_init(pgdat);
        
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
@@ -3452,8 +3451,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                        PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
                if (realsize >= memmap_pages) {
                        realsize -= memmap_pages;
-                       mminit_dprintk(MMINIT_TRACE, "memmap_init",
-                               "%s zone: %lu pages used for memmap\n",
+                       printk(KERN_DEBUG
+                               "  %s zone: %lu pages used for memmap\n",
                                zone_names[j], memmap_pages);
                } else
                        printk(KERN_WARNING
@@ -3463,8 +3462,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                /* Account for reserved pages */
                if (j == 0 && realsize > dma_reserve) {
                        realsize -= dma_reserve;
-                       mminit_dprintk(MMINIT_TRACE, "memmap_init",
-                                       "%s zone: %lu pages reserved\n",
+                       printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
                                        zone_names[0], dma_reserve);
                }
 
@@ -4238,7 +4236,7 @@ void setup_per_zone_pages_min(void)
        for_each_zone(zone) {
                u64 tmp;
 
-               spin_lock_irqsave(&zone->lru_lock, flags);
+               spin_lock_irqsave(&zone->lock, flags);
                tmp = (u64)pages_min * zone->present_pages;
                do_div(tmp, lowmem_pages);
                if (is_highmem(zone)) {
@@ -4270,7 +4268,7 @@ void setup_per_zone_pages_min(void)
                zone->pages_low   = zone->pages_min + (tmp >> 2);
                zone->pages_high  = zone->pages_min + (tmp >> 1);
                setup_zone_migrate_reserve(zone);
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(&zone->lock, flags);
        }
 
        /* update totalreserve_pages */