2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 #include <linux/module.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
21 #include <asm/pgtable.h>
24 * swapper_space is a fiction, retained to simplify the path through
25 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
26 * future use of radix_tree tags in the swap cache.
28 static const struct address_space_operations swap_aops = {
29 .writepage = swap_writepage,
30 .sync_page = block_sync_page,
31 .set_page_dirty = __set_page_dirty_nobuffers,
32 .migratepage = migrate_page,
35 static struct backing_dev_info swap_backing_dev_info = {
36 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
37 .unplug_io_fn = swap_unplug_io_fn,
40 struct address_space swapper_space = {
41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
42 .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
45 .backing_dev_info = &swap_backing_dev_info,
48 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
51 unsigned long add_total;
52 unsigned long del_total;
53 unsigned long find_success;
54 unsigned long find_total;
57 void show_swap_cache_info(void)
59 printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
60 swap_cache_info.add_total, swap_cache_info.del_total,
61 swap_cache_info.find_success, swap_cache_info.find_total);
62 printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
63 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
67 * add_to_swap_cache resembles add_to_page_cache on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
70 static int add_to_swap_cache(struct page *page, swp_entry_t entry,
75 BUG_ON(!PageLocked(page));
76 BUG_ON(PageSwapCache(page));
77 BUG_ON(PagePrivate(page));
78 error = radix_tree_preload(gfp_mask);
80 write_lock_irq(&swapper_space.tree_lock);
81 error = radix_tree_insert(&swapper_space.page_tree,
85 SetPageSwapCache(page);
86 set_page_private(page, entry.val);
87 total_swapcache_pages++;
88 __inc_zone_page_state(page, NR_FILE_PAGES);
89 INC_CACHE_INFO(add_total);
91 write_unlock_irq(&swapper_space.tree_lock);
92 radix_tree_preload_end();
98 * This must be called only on pages that have
99 * been verified to be in the swap cache.
101 void __delete_from_swap_cache(struct page *page)
103 BUG_ON(!PageLocked(page));
104 BUG_ON(!PageSwapCache(page));
105 BUG_ON(PageWriteback(page));
106 BUG_ON(PagePrivate(page));
108 radix_tree_delete(&swapper_space.page_tree, page_private(page));
109 set_page_private(page, 0);
110 ClearPageSwapCache(page);
111 total_swapcache_pages--;
112 __dec_zone_page_state(page, NR_FILE_PAGES);
113 INC_CACHE_INFO(del_total);
117 * add_to_swap - allocate swap space for a page
118 * @page: page we want to move to swap
120 * Allocate swap space for the page and add the page to the
121 * swap cache. Caller needs to hold the page lock.
123 int add_to_swap(struct page * page, gfp_t gfp_mask)
128 BUG_ON(!PageLocked(page));
131 entry = get_swap_page();
136 * Radix-tree node allocations from PF_MEMALLOC contexts could
137 * completely exhaust the page allocator. __GFP_NOMEMALLOC
138 * stops emergency reserves from being allocated.
140 * TODO: this could cause a theoretical memory reclaim
141 * deadlock in the swap out path.
144 * Add it to the swap cache and mark it dirty
146 err = add_to_swap_cache(page, entry,
147 gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
150 case 0: /* Success */
151 SetPageUptodate(page);
155 /* Raced with "speculative" read_swap_cache_async */
159 /* -ENOMEM radix-tree allocation failure */
167 * This must be called only on pages that have
168 * been verified to be in the swap cache and locked.
169 * It will never put the page into the free list,
170 * the caller has a reference on the page.
172 void delete_from_swap_cache(struct page *page)
176 entry.val = page_private(page);
178 write_lock_irq(&swapper_space.tree_lock);
179 __delete_from_swap_cache(page);
180 write_unlock_irq(&swapper_space.tree_lock);
183 page_cache_release(page);
187 * Strange swizzling function only for use by shmem_writepage
189 int move_to_swap_cache(struct page *page, swp_entry_t entry)
191 int err = add_to_swap_cache(page, entry, GFP_ATOMIC);
193 remove_from_page_cache(page);
194 page_cache_release(page); /* pagecache ref */
195 if (!swap_duplicate(entry))
203 * Strange swizzling function for shmem_getpage (and shmem_unuse)
205 int move_from_swap_cache(struct page *page, unsigned long index,
206 struct address_space *mapping)
208 int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
210 delete_from_swap_cache(page);
211 /* shift page from clean_pages to dirty_pages list */
212 ClearPageDirty(page);
213 set_page_dirty(page);
219 * If we are the only user, then try to free up the swap cache.
221 * Its ok to check for PageSwapCache without the page lock
222 * here because we are going to recheck again inside
223 * exclusive_swap_page() _with_ the lock.
226 static inline void free_swap_cache(struct page *page)
228 if (PageSwapCache(page) && !TestSetPageLocked(page)) {
229 remove_exclusive_swap_page(page);
235 * Perform a free_page(), also freeing any swap cache associated with
236 * this page if it is the last user of the page.
238 void free_page_and_swap_cache(struct page *page)
240 free_swap_cache(page);
241 page_cache_release(page);
245 * Passed an array of pages, drop them all from swapcache and then release
246 * them. They are removed from the LRU and freed if this is their last use.
248 void free_pages_and_swap_cache(struct page **pages, int nr)
250 struct page **pagep = pages;
254 int todo = min(nr, PAGEVEC_SIZE);
257 for (i = 0; i < todo; i++)
258 free_swap_cache(pagep[i]);
259 release_pages(pagep, todo, 0);
266 * Lookup a swap entry in the swap cache. A found page will be returned
267 * unlocked and with its refcount incremented - we rely on the kernel
268 * lock getting page table operations atomic even if we drop the page
269 * lock before returning.
271 struct page * lookup_swap_cache(swp_entry_t entry)
275 page = find_get_page(&swapper_space, entry.val);
278 INC_CACHE_INFO(find_success);
280 INC_CACHE_INFO(find_total);
285 * Locate a page of swap in physical memory, reserving swap cache space
286 * and reading the disk if it is not already cached.
287 * A failure return means that either the page allocation failed or that
288 * the swap entry is no longer in use.
290 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
291 struct vm_area_struct *vma, unsigned long addr)
293 struct page *found_page, *new_page = NULL;
298 * First check the swap cache. Since this is normally
299 * called after lookup_swap_cache() failed, re-calling
300 * that would confuse statistics.
302 found_page = find_get_page(&swapper_space, entry.val);
307 * Get a new page to read into from swap.
310 new_page = alloc_page_vma(gfp_mask, vma, addr);
312 break; /* Out of memory */
316 * Swap entry may have been freed since our caller observed it.
318 if (!swap_duplicate(entry))
322 * Associate the page with swap entry in the swap cache.
323 * May fail (-EEXIST) if there is already a page associated
324 * with this entry in the swap cache: added by a racing
325 * read_swap_cache_async, or add_to_swap or shmem_writepage
326 * re-using the just freed swap entry for an existing page.
327 * May fail (-ENOMEM) if radix-tree node allocation failed.
329 SetPageLocked(new_page);
330 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
333 * Initiate read into locked page and return.
335 lru_cache_add_active(new_page);
336 swap_readpage(NULL, new_page);
339 ClearPageLocked(new_page);
341 } while (err != -ENOMEM);
344 page_cache_release(new_page);
349 * swapin_readahead - swap in pages in hope we need them soon
350 * @entry: swap entry of this memory
351 * @vma: user vma this address belongs to
352 * @addr: target address for mempolicy
354 * Returns the struct page for entry and addr, after queueing swapin.
356 * Primitive swap readahead code. We simply read an aligned block of
357 * (1 << page_cluster) entries in the swap area. This method is chosen
358 * because it doesn't cost us any seek time. We also make sure to queue
359 * the 'original' request together with the readahead ones...
361 * This has been extended to use the NUMA policies from the mm triggering
364 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
366 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
367 struct vm_area_struct *vma, unsigned long addr)
371 unsigned long offset;
372 unsigned long end_offset;
375 * Get starting offset for readaround, and number of pages to read.
376 * Adjust starting address by readbehind (for NUMA interleave case)?
377 * No, it's very unlikely that swap layout would follow vma layout,
378 * more likely that neighbouring swap pages came from the same node:
379 * so use the same "addr" to choose the same node for each swap read.
381 nr_pages = valid_swaphandles(entry, &offset);
382 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
383 /* Ok, do the async read-ahead now */
384 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
385 gfp_mask, vma, addr);
388 page_cache_release(page);
390 lru_add_drain(); /* Push any new pages onto the LRU now */
391 return read_swap_cache_async(entry, gfp_mask, vma, addr);