]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmscan.c
[PATCH] Direct Migration V9: migrate_pages() extension
[linux-2.6-omap-h63xx.git] / mm / vmscan.c
index aa4b80dbe3ad6023d3d705cbad21ea73f69023ec..8f326ce2b690bf63953dde56bf365773fad52c21 100644 (file)
@@ -483,7 +483,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                        if (!sc->may_swap)
                                goto keep_locked;
 
-                       switch (try_to_unmap(page)) {
+                       switch (try_to_unmap(page, 0)) {
                        case SWAP_FAIL:
                                goto activate_locked;
                        case SWAP_AGAIN:
@@ -623,7 +623,7 @@ static int swap_page(struct page *page)
        struct address_space *mapping = page_mapping(page);
 
        if (page_mapped(page) && mapping)
-               if (try_to_unmap(page) != SWAP_SUCCESS)
+               if (try_to_unmap(page, 0) != SWAP_SUCCESS)
                        goto unlock_retry;
 
        if (PageDirty(page)) {
@@ -659,6 +659,154 @@ unlock_retry:
 retry:
        return -EAGAIN;
 }
+
+/*
+ * Page migration was first developed in the context of the memory hotplug
+ * project. The main authors of the migration code are:
+ *
+ * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
+ * Hirokazu Takahashi <taka@valinux.co.jp>
+ * Dave Hansen <haveblue@us.ibm.com>
+ * Christoph Lameter <clameter@sgi.com>
+ */
+
+/*
+ * Remove references for a page and establish the new page with the correct
+ * basic settings to be able to stop accesses to the page.
+ */
+static int migrate_page_remove_references(struct page *newpage,
+                               struct page *page, int nr_refs)
+{
+       struct address_space *mapping = page_mapping(page);
+       struct page **radix_pointer;
+
+       /*
+        * Avoid doing any of the following work if the page count
+        * indicates that the page is in use or truncate has removed
+        * the page.
+        */
+       if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
+               return 1;
+
+       /*
+        * Establish swap ptes for anonymous pages or destroy pte
+        * maps for files.
+        *
+        * In order to reestablish file backed mappings the fault handlers
+        * will take the radix tree_lock which may then be used to stop
+        * processses from accessing this page until the new page is ready.
+        *
+        * A process accessing via a swap pte (an anonymous page) will take a
+        * page_lock on the old page which will block the process until the
+        * migration attempt is complete. At that time the PageSwapCache bit
+        * will be examined. If the page was migrated then the PageSwapCache
+        * bit will be clear and the operation to retrieve the page will be
+        * retried which will find the new page in the radix tree. Then a new
+        * direct mapping may be generated based on the radix tree contents.
+        *
+        * If the page was not migrated then the PageSwapCache bit
+        * is still set and the operation may continue.
+        */
+       try_to_unmap(page, 1);
+
+       /*
+        * Give up if we were unable to remove all mappings.
+        */
+       if (page_mapcount(page))
+               return 1;
+
+       write_lock_irq(&mapping->tree_lock);
+
+       radix_pointer = (struct page **)radix_tree_lookup_slot(
+                                               &mapping->page_tree,
+                                               page_index(page));
+
+       if (!page_mapping(page) || page_count(page) != nr_refs ||
+                       *radix_pointer != page) {
+               write_unlock_irq(&mapping->tree_lock);
+               return 1;
+       }
+
+       /*
+        * Now we know that no one else is looking at the page.
+        *
+        * Certain minimal information about a page must be available
+        * in order for other subsystems to properly handle the page if they
+        * find it through the radix tree update before we are finished
+        * copying the page.
+        */
+       get_page(newpage);
+       newpage->index = page->index;
+       newpage->mapping = page->mapping;
+       if (PageSwapCache(page)) {
+               SetPageSwapCache(newpage);
+               set_page_private(newpage, page_private(page));
+       }
+
+       *radix_pointer = newpage;
+       __put_page(page);
+       write_unlock_irq(&mapping->tree_lock);
+
+       return 0;
+}
+
+/*
+ * Copy the page to its new location
+ */
+void migrate_page_copy(struct page *newpage, struct page *page)
+{
+       copy_highpage(newpage, page);
+
+       if (PageError(page))
+               SetPageError(newpage);
+       if (PageReferenced(page))
+               SetPageReferenced(newpage);
+       if (PageUptodate(page))
+               SetPageUptodate(newpage);
+       if (PageActive(page))
+               SetPageActive(newpage);
+       if (PageChecked(page))
+               SetPageChecked(newpage);
+       if (PageMappedToDisk(page))
+               SetPageMappedToDisk(newpage);
+
+       if (PageDirty(page)) {
+               clear_page_dirty_for_io(page);
+               set_page_dirty(newpage);
+       }
+
+       ClearPageSwapCache(page);
+       ClearPageActive(page);
+       ClearPagePrivate(page);
+       set_page_private(page, 0);
+       page->mapping = NULL;
+
+       /*
+        * If any waiters have accumulated on the new page then
+        * wake them up.
+        */
+       if (PageWriteback(newpage))
+               end_page_writeback(newpage);
+}
+
+/*
+ * Common logic to directly migrate a single page suitable for
+ * pages that do not use PagePrivate.
+ *
+ * Pages are locked upon entry and exit.
+ */
+int migrate_page(struct page *newpage, struct page *page)
+{
+       BUG_ON(PageWriteback(page));    /* Writeback must be complete */
+
+       if (migrate_page_remove_references(newpage, page, 2))
+               return -EAGAIN;
+
+       migrate_page_copy(newpage, page);
+
+       return 0;
+}
+
 /*
  * migrate_pages
  *
@@ -672,11 +820,6 @@ retry:
  * are movable anymore because t has become empty
  * or no retryable pages exist anymore.
  *
- * SIMPLIFIED VERSION: This implementation of migrate_pages
- * is only swapping out pages and never touches the second
- * list. The direct migration patchset
- * extends this function to avoid the use of swap.
- *
  * Return: Number of pages not migrated when "to" ran empty.
  */
 int migrate_pages(struct list_head *from, struct list_head *to,
@@ -697,6 +840,9 @@ redo:
        retry = 0;
 
        list_for_each_entry_safe(page, page2, from, lru) {
+               struct page *newpage = NULL;
+               struct address_space *mapping;
+
                cond_resched();
 
                rc = 0;
@@ -704,6 +850,9 @@ redo:
                        /* page was freed from under us. So we are done. */
                        goto next;
 
+               if (to && list_empty(to))
+                       break;
+
                /*
                 * Skip locked pages during the first two passes to give the
                 * functions holding the lock time to release the page. Later we
@@ -740,12 +889,64 @@ redo:
                        }
                }
 
+               if (!to) {
+                       rc = swap_page(page);
+                       goto next;
+               }
+
+               newpage = lru_to_page(to);
+               lock_page(newpage);
+
                /*
-                * Page is properly locked and writeback is complete.
+                * Pages are properly locked and writeback is complete.
                 * Try to migrate the page.
                 */
-               rc = swap_page(page);
-               goto next;
+               mapping = page_mapping(page);
+               if (!mapping)
+                       goto unlock_both;
+
+               /*
+                * Trigger writeout if page is dirty
+                */
+               if (PageDirty(page)) {
+                       switch (pageout(page, mapping)) {
+                       case PAGE_KEEP:
+                       case PAGE_ACTIVATE:
+                               goto unlock_both;
+
+                       case PAGE_SUCCESS:
+                               unlock_page(newpage);
+                               goto next;
+
+                       case PAGE_CLEAN:
+                               ; /* try to migrate the page below */
+                       }
+                }
+               /*
+                * If we have no buffer or can release the buffer
+                * then do a simple migration.
+                */
+               if (!page_has_buffers(page) ||
+                   try_to_release_page(page, GFP_KERNEL)) {
+                       rc = migrate_page(newpage, page);
+                       goto unlock_both;
+               }
+
+               /*
+                * On early passes with mapped pages simply
+                * retry. There may be a lock held for some
+                * buffers that may go away. Later
+                * swap them out.
+                */
+               if (pass > 4) {
+                       unlock_page(newpage);
+                       newpage = NULL;
+                       rc = swap_page(page);
+                       goto next;
+               }
+
+unlock_both:
+               unlock_page(newpage);
 
 unlock_page:
                unlock_page(page);
@@ -758,7 +959,10 @@ next:
                        list_move(&page->lru, failed);
                        nr_failed++;
                } else {
-                       /* Success */
+                       if (newpage) {
+                               /* Successful migration. Return page to LRU */
+                               move_to_lru(newpage);
+                       }
                        list_move(&page->lru, moved);
                }
        }