The current kswapd (and try_to_free_pages) code has an oddity where the
code will wait on IO, even if there is no IO in flight. This problem is
notable especially when the system scans through many unfreeable pages,
causing unnecessary stalls in the VM.
Additionally, tasks without __GFP_FS or __GFP_IO in the direct reclaim path
will sleep if a significant number of pages are encountered that should be
written out. This gives kswapd a chance to write out those pages, while
the direct reclaim task sleeps.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+ /*
+ * Pages that have (or should have) IO pending. If we run into
+ * a lot of these, we're better off waiting a little for IO to
+ * finish rather than scanning more pages in the VM.
+ */
+ int nr_io_pages;
+
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
*/
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
wait_on_page_writeback(page);
*/
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
wait_on_page_writeback(page);
+ else {
+ sc->nr_io_pages++;
}
referenced = page_referenced(page, 1, sc->mem_cgroup);
}
referenced = page_referenced(page, 1, sc->mem_cgroup);
if (PageDirty(page)) {
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
goto keep_locked;
if (PageDirty(page)) {
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
goto keep_locked;
+ if (!may_enter_fs) {
+ sc->nr_io_pages++;
if (!sc->may_writepage)
goto keep_locked;
if (!sc->may_writepage)
goto keep_locked;
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
- if (PageWriteback(page) || PageDirty(page))
+ if (PageWriteback(page) || PageDirty(page)) {
+ sc->nr_io_pages++;
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc->nr_scanned = 0;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
nr_reclaimed += shrink_zones(priority, zones, sc);
if (!priority)
disable_swap_token();
nr_reclaimed += shrink_zones(priority, zones, sc);
}
/* Take a nap, wait for some writeback to complete */
}
/* Take a nap, wait for some writeback to complete */
- if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
+ if (sc->nr_scanned && priority < DEF_PRIORITY - 2 &&
+ sc->nr_io_pages > sc->swap_cluster_max)
congestion_wait(WRITE, HZ/10);
}
/* top priority shrink_caches still had more to do? don't OOM, then */
congestion_wait(WRITE, HZ/10);
}
/* top priority shrink_caches still had more to do? don't OOM, then */
if (!priority)
disable_swap_token();
if (!priority)
disable_swap_token();
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && priority < DEF_PRIORITY - 2)
+ if (total_scanned && priority < DEF_PRIORITY - 2 &&
+ sc.nr_io_pages > sc.swap_cluster_max)
congestion_wait(WRITE, HZ/10);
/*
congestion_wait(WRITE, HZ/10);
/*