/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
+ /*
+ * Nodemask of nodes allowed by the caller. If NULL, all nodes
+ * are scanned.
+ */
+ nodemask_t *nodemask;
+
/* Pluggable isolate pages callback */
unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
unsigned long *scanned, int order, int mode,
do_div(delta, lru_pages + 1);
shrinker->nr += delta;
if (shrinker->nr < 0) {
- printk(KERN_ERR "%s: nr=%ld\n",
- __func__, shrinker->nr);
+ printk(KERN_ERR "shrink_slab: %pF negative objects to "
+ "delete nr=%ld\n",
+ shrinker->shrink, shrinker->nr);
shrinker->nr = max_pass;
}
static inline int is_page_cache_freeable(struct page *page)
{
- return page_count(page) - !!PagePrivate(page) == 2;
+ return page_count(page) - !!page_has_private(page) == 2;
}
static int may_write_to_queue(struct backing_dev_info *bdi)
* Some data journaling orphaned pages can have
* page->mapping == NULL while being dirty with clean buffers.
*/
- if (PagePrivate(page)) {
+ if (page_has_private(page)) {
if (try_to_free_buffers(page)) {
ClearPageDirty(page);
printk("%s: orphaned page\n", __func__);
* process address space (page_count == 1) it can be freed.
* Otherwise, leave the page on the LRU so it is swappable.
*/
- if (PagePrivate(page)) {
+ if (page_has_private(page)) {
if (!try_to_release_page(page, sc->gfp_mask))
goto activate_locked;
if (!mapping && page_count(page) == 1) {
}
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
pgdeactivate += pgmoved;
- if (buffer_heads_over_limit) {
- spin_unlock_irq(&zone->lru_lock);
- pagevec_strip(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);
- if (vm_swap_full())
- pagevec_swap_free(&pvec);
-
+ if (buffer_heads_over_limit)
+ pagevec_strip(&pvec);
pagevec_release(&pvec);
}
struct zone *zone;
sc->all_unreclaimable = 1;
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+ sc->nodemask) {
if (!populated_zone(zone))
continue;
/*
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, nodemask_t *nodemask)
{
struct scan_control sc = {
.gfp_mask = gfp_mask,
.order = order,
.mem_cgroup = NULL,
.isolate_pages = isolate_pages_global,
+ .nodemask = nodemask,
};
return do_try_to_free_pages(zonelist, &sc);
.order = 0,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
+ .nodemask = NULL, /* we don't care the placement */
};
struct zonelist *zonelist;
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
};
- node_to_cpumask_ptr(cpumask, pgdat->node_id);
+ const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
lockdep_set_current_reclaim_state(GFP_KERNEL);
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
for_each_node_state(nid, N_HIGH_MEMORY) {
pg_data_t *pgdat = NODE_DATA(nid);
- node_to_cpumask_ptr(mask, pgdat->node_id);
+ const struct cpumask *mask;
+
+ mask = cpumask_of_node(pgdat->node_id);
if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
/* One of our CPUs online: restore mask */