]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmscan.c
vmscan: Use an indexed array for LRU variables
[linux-2.6-omap-h63xx.git] / mm / vmscan.c
index 1fd4912a596c347f0eb699c5d25a085f80da6ef9..46fdaa546b8d0493acb74b2b428693d46e0b14b3 100644 (file)
@@ -819,10 +819,10 @@ static unsigned long isolate_pages_global(unsigned long nr,
                                        int active)
 {
        if (active)
-               return isolate_lru_pages(nr, &z->active_list, dst,
+               return isolate_lru_pages(nr, &z->lru[LRU_ACTIVE].list, dst,
                                                scanned, order, mode);
        else
-               return isolate_lru_pages(nr, &z->inactive_list, dst,
+               return isolate_lru_pages(nr, &z->lru[LRU_INACTIVE].list, dst,
                                                scanned, order, mode);
 }
 
@@ -973,10 +973,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
                        list_del(&page->lru);
-                       if (PageActive(page))
-                               add_page_to_active_list(zone, page);
-                       else
-                               add_page_to_inactive_list(zone, page);
+                       add_page_to_lru_list(zone, page, page_lru(page));
                        if (!pagevec_add(&pvec, page)) {
                                spin_unlock_irq(&zone->lru_lock);
                                __pagevec_release(&pvec);
@@ -1144,8 +1141,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        int pgdeactivate = 0;
        unsigned long pgscanned;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
-       LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
-       LIST_HEAD(l_active);    /* Pages to go onto the active_list */
+       LIST_HEAD(l_active);
+       LIST_HEAD(l_inactive);
        struct page *page;
        struct pagevec pvec;
        int reclaim_mapped = 0;
@@ -1194,7 +1191,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                VM_BUG_ON(!PageActive(page));
                ClearPageActive(page);
 
-               list_move(&page->lru, &zone->inactive_list);
+               list_move(&page->lru, &zone->lru[LRU_INACTIVE].list);
                mem_cgroup_move_lists(page, false);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -1224,7 +1221,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                SetPageLRU(page);
                VM_BUG_ON(!PageActive(page));
 
-               list_move(&page->lru, &zone->active_list);
+               list_move(&page->lru, &zone->lru[LRU_ACTIVE].list);
                mem_cgroup_move_lists(page, true);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -1244,65 +1241,64 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        pagevec_release(&pvec);
 }
 
+static unsigned long shrink_list(enum lru_list l, unsigned long nr_to_scan,
+       struct zone *zone, struct scan_control *sc, int priority)
+{
+       if (l == LRU_ACTIVE) {
+               shrink_active_list(nr_to_scan, zone, sc, priority);
+               return 0;
+       }
+       return shrink_inactive_list(nr_to_scan, zone, sc);
+}
+
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
 static unsigned long shrink_zone(int priority, struct zone *zone,
                                struct scan_control *sc)
 {
-       unsigned long nr_active;
-       unsigned long nr_inactive;
+       unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
        unsigned long nr_reclaimed = 0;
+       enum lru_list l;
 
        if (scan_global_lru(sc)) {
                /*
                 * Add one to nr_to_scan just to make sure that the kernel
                 * will slowly sift through the active list.
                 */
-               zone->nr_scan_active +=
-                       (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
-               nr_active = zone->nr_scan_active;
-               zone->nr_scan_inactive +=
-                       (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
-               nr_inactive = zone->nr_scan_inactive;
-               if (nr_inactive >= sc->swap_cluster_max)
-                       zone->nr_scan_inactive = 0;
-               else
-                       nr_inactive = 0;
-
-               if (nr_active >= sc->swap_cluster_max)
-                       zone->nr_scan_active = 0;
-               else
-                       nr_active = 0;
+               for_each_lru(l) {
+                       zone->lru[l].nr_scan += (zone_page_state(zone,
+                                       NR_LRU_BASE + l)  >> priority) + 1;
+                       nr[l] = zone->lru[l].nr_scan;
+                       if (nr[l] >= sc->swap_cluster_max)
+                               zone->lru[l].nr_scan = 0;
+                       else
+                               nr[l] = 0;
+               }
        } else {
                /*
                 * This reclaim occurs not because zone memory shortage but
                 * because memory controller hits its limit.
                 * Then, don't modify zone reclaim related data.
                 */
-               nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
-                                       zone, priority);
+               nr[LRU_ACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
+                                       zone, priority, LRU_ACTIVE);
 
-               nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
-                                       zone, priority);
+               nr[LRU_INACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
+                                       zone, priority, LRU_INACTIVE);
        }
 
-
-       while (nr_active || nr_inactive) {
-               if (nr_active) {
-                       nr_to_scan = min(nr_active,
+       while (nr[LRU_ACTIVE] || nr[LRU_INACTIVE]) {
+               for_each_lru(l) {
+                       if (nr[l]) {
+                               nr_to_scan = min(nr[l],
                                        (unsigned long)sc->swap_cluster_max);
-                       nr_active -= nr_to_scan;
-                       shrink_active_list(nr_to_scan, zone, sc, priority);
-               }
+                               nr[l] -= nr_to_scan;
 
-               if (nr_inactive) {
-                       nr_to_scan = min(nr_inactive,
-                                       (unsigned long)sc->swap_cluster_max);
-                       nr_inactive -= nr_to_scan;
-                       nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
-                                                               sc);
+                               nr_reclaimed += shrink_list(l, nr_to_scan,
+                                                       zone, sc, priority);
+                       }
                }
        }
 
@@ -1819,6 +1815,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
 {
        struct zone *zone;
        unsigned long nr_to_scan, ret = 0;
+       enum lru_list l;
 
        for_each_zone(zone) {
 
@@ -1828,28 +1825,25 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
-               /* For pass = 0 we don't shrink the active list */
-               if (pass > 0) {
-                       zone->nr_scan_active +=
-                               (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
-                       if (zone->nr_scan_active >= nr_pages || pass > 3) {
-                               zone->nr_scan_active = 0;
+               for_each_lru(l) {
+                       /* For pass = 0 we don't shrink the active list */
+                       if (pass == 0 && l == LRU_ACTIVE)
+                               continue;
+
+                       zone->lru[l].nr_scan +=
+                               (zone_page_state(zone, NR_LRU_BASE + l)
+                                                               >> prio) + 1;
+                       if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                               zone->lru[l].nr_scan = 0;
                                nr_to_scan = min(nr_pages,
-                                       zone_page_state(zone, NR_ACTIVE));
-                               shrink_active_list(nr_to_scan, zone, sc, prio);
+                                       zone_page_state(zone,
+                                                       NR_LRU_BASE + l));
+                               ret += shrink_list(l, nr_to_scan, zone,
+                                                               sc, prio);
+                               if (ret >= nr_pages)
+                                       return ret;
                        }
                }
-
-               zone->nr_scan_inactive +=
-                       (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
-               if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
-                       zone->nr_scan_inactive = 0;
-                       nr_to_scan = min(nr_pages,
-                               zone_page_state(zone, NR_INACTIVE));
-                       ret += shrink_inactive_list(nr_to_scan, zone, sc);
-                       if (ret >= nr_pages)
-                               return ret;
-               }
        }
 
        return ret;