]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmscan.c
vmscan: fix get_scan_ratio() comment
[linux-2.6-omap-h63xx.git] / mm / vmscan.c
index 154b9b608da6e9b304f41bd32238bbaa9a633a75..7ea1440b53db23350112cb1a46c592542807e4ac 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/freezer.h>
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
+#include <linux/sysctl.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -484,6 +485,7 @@ void putback_lru_page(struct page *page)
 {
        int lru;
        int active = !!TestClearPageActive(page);
+       int was_unevictable = PageUnevictable(page);
 
        VM_BUG_ON(PageLRU(page));
 
@@ -525,6 +527,11 @@ redo:
                 */
        }
 
+       if (was_unevictable && lru != LRU_UNEVICTABLE)
+               count_vm_event(UNEVICTABLE_PGRESCUED);
+       else if (!was_unevictable && lru == LRU_UNEVICTABLE)
+               count_vm_event(UNEVICTABLE_PGCULLED);
+
        put_page(page);         /* drop ref from isolate */
 }
 
@@ -576,11 +583,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                sc->nr_scanned++;
 
-               if (unlikely(!page_evictable(page, NULL))) {
-                       unlock_page(page);
-                       putback_lru_page(page);
-                       continue;
-               }
+               if (unlikely(!page_evictable(page, NULL)))
+                       goto cull_mlocked;
 
                if (!sc->may_swap && page_mapped(page))
                        goto keep_locked;
@@ -618,9 +622,22 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
                 */
-               if (PageAnon(page) && !PageSwapCache(page))
+               if (PageAnon(page) && !PageSwapCache(page)) {
+                       if (!(sc->gfp_mask & __GFP_IO))
+                               goto keep_locked;
+                       switch (try_to_munlock(page)) {
+                       case SWAP_FAIL:         /* shouldn't happen */
+                       case SWAP_AGAIN:
+                               goto keep_locked;
+                       case SWAP_MLOCK:
+                               goto cull_mlocked;
+                       case SWAP_SUCCESS:
+                               ; /* fall thru'; add to swap cache */
+                       }
                        if (!add_to_swap(page, GFP_ATOMIC))
                                goto activate_locked;
+                       may_enter_fs = 1;
+               }
 #endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
@@ -635,6 +652,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto activate_locked;
                        case SWAP_AGAIN:
                                goto keep_locked;
+                       case SWAP_MLOCK:
+                               goto cull_mlocked;
                        case SWAP_SUCCESS:
                                ; /* try to free the page below */
                        }
@@ -716,7 +735,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (!mapping || !__remove_mapping(mapping, page))
                        goto keep_locked;
 
-               unlock_page(page);
+               /*
+                * At this point, we have no other references and there is
+                * no way to pick any more up (removed from LRU, removed
+                * from pagecache). Can use non-atomic bitops now (and
+                * we obviously don't have to worry about waking up a process
+                * waiting on the page lock, because there are no references.
+                */
+               __clear_page_locked(page);
 free_it:
                nr_reclaimed++;
                if (!pagevec_add(&freed_pvec, page)) {
@@ -725,6 +751,11 @@ free_it:
                }
                continue;
 
+cull_mlocked:
+               unlock_page(page);
+               putback_lru_page(page);
+               continue;
+
 activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && vm_swap_full())
@@ -736,7 +767,7 @@ keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               VM_BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
        }
        list_splice(&ret_pages, page_list);
        if (pagevec_count(&freed_pvec))
@@ -1358,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        file_prio = 200 - sc->swappiness;
 
        /*
-        *                  anon       recent_rotated[0]
-        * %anon = 100 * ----------- / ----------------- * IO cost
-        *               anon + file      rotate_sum
+        * The amount of pressure on anon vs file pages is inversely
+        * proportional to the fraction of recently scanned pages on
+        * each list that were recently referenced and in active use.
         */
        ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
        ap /= zone->recent_rotated[0] + 1;
@@ -1392,16 +1423,13 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                if (scan_global_lru(sc)) {
                        int file = is_file_lru(l);
                        int scan;
-                       /*
-                        * Add one to nr_to_scan just to make sure that the
-                        * kernel will slowly sift through each list.
-                        */
+
                        scan = zone_page_state(zone, NR_LRU_BASE + l);
                        if (priority) {
                                scan >>= priority;
                                scan = (scan * percent[file]) / 100;
                        }
-                       zone->lru[l].nr_scan += scan + 1;
+                       zone->lru[l].nr_scan += scan;
                        nr[l] = zone->lru[l].nr_scan;
                        if (nr[l] >= sc->swap_cluster_max)
                                zone->lru[l].nr_scan = 0;
@@ -2323,16 +2351,242 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  * @vma: the VMA in which the page is or will be mapped, may be NULL
  *
  * Test whether page is evictable--i.e., should be placed on active/inactive
- * lists vs unevictable list.
+ * lists vs unevictable list.  The vma argument is !NULL when called from the
+ * fault path to determine how to instantate a new page.
  *
  * Reasons page might not be evictable:
- * TODO - later patches
+ * (1) page's mapping marked unevictable
+ * (2) page is part of an mlocked VMA
+ *
  */
 int page_evictable(struct page *page, struct vm_area_struct *vma)
 {
 
-       /* TODO:  test page [!]evictable conditions */
+       if (mapping_unevictable(page_mapping(page)))
+               return 0;
+
+       if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+               return 0;
 
        return 1;
 }
+
+/**
+ * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+ * @page: page to check evictability and move to appropriate lru list
+ * @zone: zone page is in
+ *
+ * Checks a page for evictability and moves the page to the appropriate
+ * zone lru list.
+ *
+ * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+ * have PageUnevictable set.
+ */
+static void check_move_unevictable_page(struct page *page, struct zone *zone)
+{
+       VM_BUG_ON(PageActive(page));
+
+retry:
+       ClearPageUnevictable(page);
+       if (page_evictable(page, NULL)) {
+               enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+
+               __dec_zone_state(zone, NR_UNEVICTABLE);
+               list_move(&page->lru, &zone->lru[l].list);
+               __inc_zone_state(zone, NR_INACTIVE_ANON + l);
+               __count_vm_event(UNEVICTABLE_PGRESCUED);
+       } else {
+               /*
+                * rotate unevictable list
+                */
+               SetPageUnevictable(page);
+               list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+               if (page_evictable(page, NULL))
+                       goto retry;
+       }
+}
+
+/**
+ * scan_mapping_unevictable_pages - scan an address space for evictable pages
+ * @mapping: struct address_space to scan for evictable pages
+ *
+ * Scan all pages in mapping.  Check unevictable pages for
+ * evictability and move them to the appropriate zone lru list.
+ */
+void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+       pgoff_t next = 0;
+       pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+                        PAGE_CACHE_SHIFT;
+       struct zone *zone;
+       struct pagevec pvec;
+
+       if (mapping->nrpages == 0)
+               return;
+
+       pagevec_init(&pvec, 0);
+       while (next < end &&
+               pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+               int i;
+               int pg_scanned = 0;
+
+               zone = NULL;
+
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+                       pgoff_t page_index = page->index;
+                       struct zone *pagezone = page_zone(page);
+
+                       pg_scanned++;
+                       if (page_index > next)
+                               next = page_index;
+                       next++;
+
+                       if (pagezone != zone) {
+                               if (zone)
+                                       spin_unlock_irq(&zone->lru_lock);
+                               zone = pagezone;
+                               spin_lock_irq(&zone->lru_lock);
+                       }
+
+                       if (PageLRU(page) && PageUnevictable(page))
+                               check_move_unevictable_page(page, zone);
+               }
+               if (zone)
+                       spin_unlock_irq(&zone->lru_lock);
+               pagevec_release(&pvec);
+
+               count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+       }
+
+}
+
+/**
+ * scan_zone_unevictable_pages - check unevictable list for evictable pages
+ * @zone - zone of which to scan the unevictable list
+ *
+ * Scan @zone's unevictable LRU lists to check for pages that have become
+ * evictable.  Move those that have to @zone's inactive list where they
+ * become candidates for reclaim, unless shrink_inactive_zone() decides
+ * to reactivate them.  Pages that are still unevictable are rotated
+ * back onto @zone's unevictable list.
+ */
+#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
+void scan_zone_unevictable_pages(struct zone *zone)
+{
+       struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
+       unsigned long scan;
+       unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
+
+       while (nr_to_scan > 0) {
+               unsigned long batch_size = min(nr_to_scan,
+                                               SCAN_UNEVICTABLE_BATCH_SIZE);
+
+               spin_lock_irq(&zone->lru_lock);
+               for (scan = 0;  scan < batch_size; scan++) {
+                       struct page *page = lru_to_page(l_unevictable);
+
+                       if (!trylock_page(page))
+                               continue;
+
+                       prefetchw_prev_lru_page(page, l_unevictable, flags);
+
+                       if (likely(PageLRU(page) && PageUnevictable(page)))
+                               check_move_unevictable_page(page, zone);
+
+                       unlock_page(page);
+               }
+               spin_unlock_irq(&zone->lru_lock);
+
+               nr_to_scan -= batch_size;
+       }
+}
+
+
+/**
+ * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
+ *
+ * A really big hammer:  scan all zones' unevictable LRU lists to check for
+ * pages that have become evictable.  Move those back to the zones'
+ * inactive list where they become candidates for reclaim.
+ * This occurs when, e.g., we have unswappable pages on the unevictable lists,
+ * and we add swap to the system.  As such, it runs in the context of a task
+ * that has possibly/probably made some previously unevictable pages
+ * evictable.
+ */
+void scan_all_zones_unevictable_pages(void)
+{
+       struct zone *zone;
+
+       for_each_zone(zone) {
+               scan_zone_unevictable_pages(zone);
+       }
+}
+
+/*
+ * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
+ * all nodes' unevictable lists for evictable pages
+ */
+unsigned long scan_unevictable_pages;
+
+int scan_unevictable_handler(struct ctl_table *table, int write,
+                          struct file *file, void __user *buffer,
+                          size_t *length, loff_t *ppos)
+{
+       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+
+       if (write && *(unsigned long *)table->data)
+               scan_all_zones_unevictable_pages();
+
+       scan_unevictable_pages = 0;
+       return 0;
+}
+
+/*
+ * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
+ * a specified node's per zone unevictable lists for evictable pages.
+ */
+
+static ssize_t read_scan_unevictable_node(struct sys_device *dev,
+                                         struct sysdev_attribute *attr,
+                                         char *buf)
+{
+       return sprintf(buf, "0\n");     /* always zero; should fit... */
+}
+
+static ssize_t write_scan_unevictable_node(struct sys_device *dev,
+                                          struct sysdev_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
+       struct zone *zone;
+       unsigned long res;
+       unsigned long req = strict_strtoul(buf, 10, &res);
+
+       if (!req)
+               return 1;       /* zero is no-op */
+
+       for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+               if (!populated_zone(zone))
+                       continue;
+               scan_zone_unevictable_pages(zone);
+       }
+       return 1;
+}
+
+
+static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+                       read_scan_unevictable_node,
+                       write_scan_unevictable_node);
+
+int scan_unevictable_register_node(struct node *node)
+{
+       return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+
+void scan_unevictable_unregister_node(struct node *node)
+{
+       sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+
 #endif