]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmscan.c
vmscan: Use an indexed array for LRU variables
[linux-2.6-omap-h63xx.git] / mm / vmscan.c
index 75be453628bf6aed4fbcd6a3fdaa59f62603166e..46fdaa546b8d0493acb74b2b428693d46e0b14b3 100644 (file)
@@ -496,7 +496,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                page = lru_to_page(page_list);
                list_del(&page->lru);
 
-               if (TestSetPageLocked(page))
+               if (!trylock_page(page))
                        goto keep;
 
                VM_BUG_ON(PageActive(page));
@@ -582,7 +582,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                 * A synchronous write - probably a ramdisk.  Go
                                 * ahead and try to reclaim the page.
                                 */
-                               if (TestSetPageLocked(page))
+                               if (!trylock_page(page))
                                        goto keep;
                                if (PageDirty(page) || PageWriteback(page))
                                        goto keep_locked;
@@ -819,10 +819,10 @@ static unsigned long isolate_pages_global(unsigned long nr,
                                        int active)
 {
        if (active)
-               return isolate_lru_pages(nr, &z->active_list, dst,
+               return isolate_lru_pages(nr, &z->lru[LRU_ACTIVE].list, dst,
                                                scanned, order, mode);
        else
-               return isolate_lru_pages(nr, &z->inactive_list, dst,
+               return isolate_lru_pages(nr, &z->lru[LRU_INACTIVE].list, dst,
                                                scanned, order, mode);
 }
 
@@ -844,6 +844,51 @@ static unsigned long clear_active_flags(struct list_head *page_list)
        return nr_active;
 }
 
+/**
+ * isolate_lru_page - tries to isolate a page from its LRU list
+ * @page: page to isolate from its LRU list
+ *
+ * Isolates a @page from an LRU list, clears PageLRU and adjusts the
+ * vmstat statistic corresponding to whatever LRU list the page was on.
+ *
+ * Returns 0 if the page was removed from an LRU list.
+ * Returns -EBUSY if the page was not on an LRU list.
+ *
+ * The returned page will have PageLRU() cleared.  If it was found on
+ * the active list, it will have PageActive set.  That flag may need
+ * to be cleared by the caller before letting the page go.
+ *
+ * The vmstat statistic corresponding to the list on which the page was
+ * found will be decremented.
+ *
+ * Restrictions:
+ * (1) Must be called with an elevated refcount on the page. This is a
+ *     fundamentnal difference from isolate_lru_pages (which is called
+ *     without a stable reference).
+ * (2) the lru_lock must not be held.
+ * (3) interrupts must be enabled.
+ */
+int isolate_lru_page(struct page *page)
+{
+       int ret = -EBUSY;
+
+       if (PageLRU(page)) {
+               struct zone *zone = page_zone(page);
+
+               spin_lock_irq(&zone->lru_lock);
+               if (PageLRU(page) && get_page_unless_zero(page)) {
+                       ret = 0;
+                       ClearPageLRU(page);
+                       if (PageActive(page))
+                               del_page_from_active_list(zone, page);
+                       else
+                               del_page_from_inactive_list(zone, page);
+               }
+               spin_unlock_irq(&zone->lru_lock);
+       }
+       return ret;
+}
+
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
@@ -928,10 +973,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
                        list_del(&page->lru);
-                       if (PageActive(page))
-                               add_page_to_active_list(zone, page);
-                       else
-                               add_page_to_inactive_list(zone, page);
+                       add_page_to_lru_list(zone, page, page_lru(page));
                        if (!pagevec_add(&pvec, page)) {
                                spin_unlock_irq(&zone->lru_lock);
                                __pagevec_release(&pvec);
@@ -1099,8 +1141,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        int pgdeactivate = 0;
        unsigned long pgscanned;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
-       LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
-       LIST_HEAD(l_active);    /* Pages to go onto the active_list */
+       LIST_HEAD(l_active);
+       LIST_HEAD(l_inactive);
        struct page *page;
        struct pagevec pvec;
        int reclaim_mapped = 0;
@@ -1149,7 +1191,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                VM_BUG_ON(!PageActive(page));
                ClearPageActive(page);
 
-               list_move(&page->lru, &zone->inactive_list);
+               list_move(&page->lru, &zone->lru[LRU_INACTIVE].list);
                mem_cgroup_move_lists(page, false);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -1179,7 +1221,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                SetPageLRU(page);
                VM_BUG_ON(!PageActive(page));
 
-               list_move(&page->lru, &zone->active_list);
+               list_move(&page->lru, &zone->lru[LRU_ACTIVE].list);
                mem_cgroup_move_lists(page, true);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -1199,65 +1241,64 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        pagevec_release(&pvec);
 }
 
+static unsigned long shrink_list(enum lru_list l, unsigned long nr_to_scan,
+       struct zone *zone, struct scan_control *sc, int priority)
+{
+       if (l == LRU_ACTIVE) {
+               shrink_active_list(nr_to_scan, zone, sc, priority);
+               return 0;
+       }
+       return shrink_inactive_list(nr_to_scan, zone, sc);
+}
+
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
 static unsigned long shrink_zone(int priority, struct zone *zone,
                                struct scan_control *sc)
 {
-       unsigned long nr_active;
-       unsigned long nr_inactive;
+       unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
        unsigned long nr_reclaimed = 0;
+       enum lru_list l;
 
        if (scan_global_lru(sc)) {
                /*
                 * Add one to nr_to_scan just to make sure that the kernel
                 * will slowly sift through the active list.
                 */
-               zone->nr_scan_active +=
-                       (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
-               nr_active = zone->nr_scan_active;
-               zone->nr_scan_inactive +=
-                       (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
-               nr_inactive = zone->nr_scan_inactive;
-               if (nr_inactive >= sc->swap_cluster_max)
-                       zone->nr_scan_inactive = 0;
-               else
-                       nr_inactive = 0;
-
-               if (nr_active >= sc->swap_cluster_max)
-                       zone->nr_scan_active = 0;
-               else
-                       nr_active = 0;
+               for_each_lru(l) {
+                       zone->lru[l].nr_scan += (zone_page_state(zone,
+                                       NR_LRU_BASE + l)  >> priority) + 1;
+                       nr[l] = zone->lru[l].nr_scan;
+                       if (nr[l] >= sc->swap_cluster_max)
+                               zone->lru[l].nr_scan = 0;
+                       else
+                               nr[l] = 0;
+               }
        } else {
                /*
                 * This reclaim occurs not because zone memory shortage but
                 * because memory controller hits its limit.
                 * Then, don't modify zone reclaim related data.
                 */
-               nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
-                                       zone, priority);
+               nr[LRU_ACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
+                                       zone, priority, LRU_ACTIVE);
 
-               nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
-                                       zone, priority);
+               nr[LRU_INACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
+                                       zone, priority, LRU_INACTIVE);
        }
 
-
-       while (nr_active || nr_inactive) {
-               if (nr_active) {
-                       nr_to_scan = min(nr_active,
+       while (nr[LRU_ACTIVE] || nr[LRU_INACTIVE]) {
+               for_each_lru(l) {
+                       if (nr[l]) {
+                               nr_to_scan = min(nr[l],
                                        (unsigned long)sc->swap_cluster_max);
-                       nr_active -= nr_to_scan;
-                       shrink_active_list(nr_to_scan, zone, sc, priority);
-               }
+                               nr[l] -= nr_to_scan;
 
-               if (nr_inactive) {
-                       nr_to_scan = min(nr_inactive,
-                                       (unsigned long)sc->swap_cluster_max);
-                       nr_inactive -= nr_to_scan;
-                       nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
-                                                               sc);
+                               nr_reclaimed += shrink_list(l, nr_to_scan,
+                                                       zone, sc, priority);
+                       }
                }
        }
 
@@ -1774,6 +1815,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
 {
        struct zone *zone;
        unsigned long nr_to_scan, ret = 0;
+       enum lru_list l;
 
        for_each_zone(zone) {
 
@@ -1783,28 +1825,25 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
-               /* For pass = 0 we don't shrink the active list */
-               if (pass > 0) {
-                       zone->nr_scan_active +=
-                               (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
-                       if (zone->nr_scan_active >= nr_pages || pass > 3) {
-                               zone->nr_scan_active = 0;
+               for_each_lru(l) {
+                       /* For pass = 0 we don't shrink the active list */
+                       if (pass == 0 && l == LRU_ACTIVE)
+                               continue;
+
+                       zone->lru[l].nr_scan +=
+                               (zone_page_state(zone, NR_LRU_BASE + l)
+                                                               >> prio) + 1;
+                       if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                               zone->lru[l].nr_scan = 0;
                                nr_to_scan = min(nr_pages,
-                                       zone_page_state(zone, NR_ACTIVE));
-                               shrink_active_list(nr_to_scan, zone, sc, prio);
+                                       zone_page_state(zone,
+                                                       NR_LRU_BASE + l));
+                               ret += shrink_list(l, nr_to_scan, zone,
+                                                               sc, prio);
+                               if (ret >= nr_pages)
+                                       return ret;
                        }
                }
-
-               zone->nr_scan_inactive +=
-                       (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
-               if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
-                       zone->nr_scan_inactive = 0;
-                       nr_to_scan = min(nr_pages,
-                               zone_page_state(zone, NR_INACTIVE));
-                       ret += shrink_inactive_list(nr_to_scan, zone, sc);
-                       if (ret >= nr_pages)
-                               return ret;
-               }
        }
 
        return ret;