#include <linux/freezer.h>
#include <linux/memcontrol.h>
#include <linux/delayacct.h>
+#include <linux/sysctl.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
sc->nr_scanned++;
- if (unlikely(!page_evictable(page, NULL))) {
- unlock_page(page);
- putback_lru_page(page);
- continue;
- }
+ if (unlikely(!page_evictable(page, NULL)))
+ goto cull_mlocked;
if (!sc->may_swap && page_mapped(page))
goto keep_locked;
* Anonymous process memory has backing store?
* Try to allocate it some swap space here.
*/
- if (PageAnon(page) && !PageSwapCache(page))
+ if (PageAnon(page) && !PageSwapCache(page)) {
+ switch (try_to_munlock(page)) {
+ case SWAP_FAIL: /* shouldn't happen */
+ case SWAP_AGAIN:
+ goto keep_locked;
+ case SWAP_MLOCK:
+ goto cull_mlocked;
+ case SWAP_SUCCESS:
+ ; /* fall thru'; add to swap cache */
+ }
if (!add_to_swap(page, GFP_ATOMIC))
goto activate_locked;
+ }
#endif /* CONFIG_SWAP */
mapping = page_mapping(page);
goto activate_locked;
case SWAP_AGAIN:
goto keep_locked;
+ case SWAP_MLOCK:
+ goto cull_mlocked;
case SWAP_SUCCESS:
; /* try to free the page below */
}
if (!mapping || !__remove_mapping(mapping, page))
goto keep_locked;
- unlock_page(page);
+ /*
+ * At this point, we have no other references and there is
+ * no way to pick any more up (removed from LRU, removed
+ * from pagecache). Can use non-atomic bitops now (and
+ * we obviously don't have to worry about waking up a process
+ * waiting on the page lock, because there are no references.
+ */
+ __clear_page_locked(page);
free_it:
nr_reclaimed++;
if (!pagevec_add(&freed_pvec, page)) {
}
continue;
+cull_mlocked:
+ unlock_page(page);
+ putback_lru_page(page);
+ continue;
+
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && vm_swap_full())
unlock_page(page);
keep:
list_add(&page->lru, &ret_pages);
- VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
}
list_splice(&ret_pages, page_list);
if (pagevec_count(&freed_pvec))
if (scan_global_lru(sc)) {
int file = is_file_lru(l);
int scan;
- /*
- * Add one to nr_to_scan just to make sure that the
- * kernel will slowly sift through each list.
- */
+
scan = zone_page_state(zone, NR_LRU_BASE + l);
if (priority) {
scan >>= priority;
scan = (scan * percent[file]) / 100;
}
- zone->lru[l].nr_scan += scan + 1;
+ zone->lru[l].nr_scan += scan;
nr[l] = zone->lru[l].nr_scan;
if (nr[l] >= sc->swap_cluster_max)
zone->lru[l].nr_scan = 0;
* @vma: the VMA in which the page is or will be mapped, may be NULL
*
* Test whether page is evictable--i.e., should be placed on active/inactive
- * lists vs unevictable list.
+ * lists vs unevictable list. The vma argument is !NULL when called from the
+ * fault path to determine how to instantate a new page.
*
* Reasons page might not be evictable:
* (1) page's mapping marked unevictable
+ * (2) page is part of an mlocked VMA
*
- * TODO - later patches
*/
int page_evictable(struct page *page, struct vm_area_struct *vma)
{
if (mapping_unevictable(page_mapping(page)))
return 0;
- /* TODO: test page [!]evictable conditions */
+ if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+ return 0;
+
+ return 1;
+}
+
+static void show_page_path(struct page *page)
+{
+ char buf[256];
+ if (page_is_file_cache(page)) {
+ struct address_space *mapping = page->mapping;
+ struct dentry *dentry;
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ spin_lock(&mapping->i_mmap_lock);
+ dentry = d_find_alias(mapping->host);
+ printk(KERN_INFO "rescued: %s %lu\n",
+ dentry_path(dentry, buf, 256), pgoff);
+ spin_unlock(&mapping->i_mmap_lock);
+ } else {
+#if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU)
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+
+ anon_vma = page_lock_anon_vma(page);
+ if (!anon_vma)
+ return;
+
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ printk(KERN_INFO "rescued: anon %s\n",
+ vma->vm_mm->owner->comm);
+ break;
+ }
+ page_unlock_anon_vma(anon_vma);
+#endif
+ }
+}
+
+
+/**
+ * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+ * @page: page to check evictability and move to appropriate lru list
+ * @zone: zone page is in
+ *
+ * Checks a page for evictability and moves the page to the appropriate
+ * zone lru list.
+ *
+ * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+ * have PageUnevictable set.
+ */
+static void check_move_unevictable_page(struct page *page, struct zone *zone)
+{
+ VM_BUG_ON(PageActive(page));
+
+retry:
+ ClearPageUnevictable(page);
+ if (page_evictable(page, NULL)) {
+ enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+
+ show_page_path(page);
+
+ __dec_zone_state(zone, NR_UNEVICTABLE);
+ list_move(&page->lru, &zone->lru[l].list);
+ __inc_zone_state(zone, NR_INACTIVE_ANON + l);
+ __count_vm_event(UNEVICTABLE_PGRESCUED);
+ } else {
+ /*
+ * rotate unevictable list
+ */
+ SetPageUnevictable(page);
+ list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+ if (page_evictable(page, NULL))
+ goto retry;
+ }
+}
+
+/**
+ * scan_mapping_unevictable_pages - scan an address space for evictable pages
+ * @mapping: struct address_space to scan for evictable pages
+ *
+ * Scan all pages in mapping. Check unevictable pages for
+ * evictability and move them to the appropriate zone lru list.
+ */
+void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+ pgoff_t next = 0;
+ pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ struct zone *zone;
+ struct pagevec pvec;
+
+ if (mapping->nrpages == 0)
+ return;
+
+ pagevec_init(&pvec, 0);
+ while (next < end &&
+ pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ int i;
+ int pg_scanned = 0;
+
+ zone = NULL;
+
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
+ pgoff_t page_index = page->index;
+ struct zone *pagezone = page_zone(page);
+
+ pg_scanned++;
+ if (page_index > next)
+ next = page_index;
+ next++;
+
+ if (pagezone != zone) {
+ if (zone)
+ spin_unlock_irq(&zone->lru_lock);
+ zone = pagezone;
+ spin_lock_irq(&zone->lru_lock);
+ }
+
+ if (PageLRU(page) && PageUnevictable(page))
+ check_move_unevictable_page(page, zone);
+ }
+ if (zone)
+ spin_unlock_irq(&zone->lru_lock);
+ pagevec_release(&pvec);
+
+ count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+ }
+
+}
+
+/**
+ * scan_zone_unevictable_pages - check unevictable list for evictable pages
+ * @zone - zone of which to scan the unevictable list
+ *
+ * Scan @zone's unevictable LRU lists to check for pages that have become
+ * evictable. Move those that have to @zone's inactive list where they
+ * become candidates for reclaim, unless shrink_inactive_zone() decides
+ * to reactivate them. Pages that are still unevictable are rotated
+ * back onto @zone's unevictable list.
+ */
+#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
+void scan_zone_unevictable_pages(struct zone *zone)
+{
+ struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
+ unsigned long scan;
+ unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
+
+ while (nr_to_scan > 0) {
+ unsigned long batch_size = min(nr_to_scan,
+ SCAN_UNEVICTABLE_BATCH_SIZE);
+
+ spin_lock_irq(&zone->lru_lock);
+ for (scan = 0; scan < batch_size; scan++) {
+ struct page *page = lru_to_page(l_unevictable);
+
+ if (!trylock_page(page))
+ continue;
+
+ prefetchw_prev_lru_page(page, l_unevictable, flags);
+
+ if (likely(PageLRU(page) && PageUnevictable(page)))
+ check_move_unevictable_page(page, zone);
+
+ unlock_page(page);
+ }
+ spin_unlock_irq(&zone->lru_lock);
+
+ nr_to_scan -= batch_size;
+ }
+}
+
+
+/**
+ * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
+ *
+ * A really big hammer: scan all zones' unevictable LRU lists to check for
+ * pages that have become evictable. Move those back to the zones'
+ * inactive list where they become candidates for reclaim.
+ * This occurs when, e.g., we have unswappable pages on the unevictable lists,
+ * and we add swap to the system. As such, it runs in the context of a task
+ * that has possibly/probably made some previously unevictable pages
+ * evictable.
+ */
+void scan_all_zones_unevictable_pages(void)
+{
+ struct zone *zone;
+
+ for_each_zone(zone) {
+ scan_zone_unevictable_pages(zone);
+ }
+}
+
+/*
+ * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
+ * all nodes' unevictable lists for evictable pages
+ */
+unsigned long scan_unevictable_pages;
+
+int scan_unevictable_handler(struct ctl_table *table, int write,
+ struct file *file, void __user *buffer,
+ size_t *length, loff_t *ppos)
+{
+ proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+
+ if (write && *(unsigned long *)table->data)
+ scan_all_zones_unevictable_pages();
+
+ scan_unevictable_pages = 0;
+ return 0;
+}
+
+/*
+ * per node 'scan_unevictable_pages' attribute. On demand re-scan of
+ * a specified node's per zone unevictable lists for evictable pages.
+ */
+
+static ssize_t read_scan_unevictable_node(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0\n"); /* always zero; should fit... */
+}
+
+static ssize_t write_scan_unevictable_node(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
+ struct zone *zone;
+ unsigned long res;
+ unsigned long req = strict_strtoul(buf, 10, &res);
+
+ if (!req)
+ return 1; /* zero is no-op */
+ for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+ if (!populated_zone(zone))
+ continue;
+ scan_zone_unevictable_pages(zone);
+ }
return 1;
}
+
+
+static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+ read_scan_unevictable_node,
+ write_scan_unevictable_node);
+
+int scan_unevictable_register_node(struct node *node)
+{
+ return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+
+void scan_unevictable_unregister_node(struct node *node)
+{
+ sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+
#endif