]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmscan.c
mm: remove unevictable's show_page_path
[linux-2.6-omap-h63xx.git] / mm / vmscan.c
index ca64e3e0c518965a81bc99516199978e16f6b469..c141b3e780719d314a5010f702190d8a9c56c23b 100644 (file)
@@ -732,7 +732,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (!mapping || !__remove_mapping(mapping, page))
                        goto keep_locked;
 
-               unlock_page(page);
+               /*
+                * At this point, we have no other references and there is
+                * no way to pick any more up (removed from LRU, removed
+                * from pagecache). Can use non-atomic bitops now (and
+                * we obviously don't have to worry about waking up a process
+                * waiting on the page lock, because there are no references.
+                */
+               __clear_page_locked(page);
 free_it:
                nr_reclaimed++;
                if (!pagevec_add(&freed_pvec, page)) {
@@ -1413,16 +1420,13 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                if (scan_global_lru(sc)) {
                        int file = is_file_lru(l);
                        int scan;
-                       /*
-                        * Add one to nr_to_scan just to make sure that the
-                        * kernel will slowly sift through each list.
-                        */
+
                        scan = zone_page_state(zone, NR_LRU_BASE + l);
                        if (priority) {
                                scan >>= priority;
                                scan = (scan * percent[file]) / 100;
                        }
-                       zone->lru[l].nr_scan += scan + 1;
+                       zone->lru[l].nr_scan += scan;
                        nr[l] = zone->lru[l].nr_scan;
                        if (nr[l] >= sc->swap_cluster_max)
                                zone->lru[l].nr_scan = 0;
@@ -2364,39 +2368,6 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
        return 1;
 }
 
-static void show_page_path(struct page *page)
-{
-       char buf[256];
-       if (page_is_file_cache(page)) {
-               struct address_space *mapping = page->mapping;
-               struct dentry *dentry;
-               pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-               spin_lock(&mapping->i_mmap_lock);
-               dentry = d_find_alias(mapping->host);
-               printk(KERN_INFO "rescued: %s %lu\n",
-                      dentry_path(dentry, buf, 256), pgoff);
-               spin_unlock(&mapping->i_mmap_lock);
-       } else {
-#if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU)
-               struct anon_vma *anon_vma;
-               struct vm_area_struct *vma;
-
-               anon_vma = page_lock_anon_vma(page);
-               if (!anon_vma)
-                       return;
-
-               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-                       printk(KERN_INFO "rescued: anon %s\n",
-                              vma->vm_mm->owner->comm);
-                       break;
-               }
-               page_unlock_anon_vma(anon_vma);
-#endif
-       }
-}
-
-
 /**
  * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
  * @page: page to check evictability and move to appropriate lru list
@@ -2417,8 +2388,6 @@ retry:
        if (page_evictable(page, NULL)) {
                enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
 
-               show_page_path(page);
-
                __dec_zone_state(zone, NR_UNEVICTABLE);
                list_move(&page->lru, &zone->lru[l].list);
                __inc_zone_state(zone, NR_INACTIVE_ANON + l);