/*
* While reclaiming in a hiearchy, we cache the last child we
- * reclaimed from. Protected by cgroup_lock()
+ * reclaimed from. Protected by hierarchy_mutex
*/
struct mem_cgroup *last_scanned_child;
/*
return;
pc = lookup_page_cgroup(page);
+ /*
+ * Used bit is set without atomic ops but after smp_wmb().
+ * For making pc->mem_cgroup visible, insert smp_rmb() here.
+ */
smp_rmb();
/* unused page is not rotated. */
if (!PageCgroupUsed(pc))
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(page);
- /* barrier to sync with "charge" */
+ /*
+ * Used bit is set without atomic ops but after smp_wmb().
+ * For making pc->mem_cgroup visible, insert smp_rmb() here.
+ */
smp_rmb();
if (!PageCgroupUsed(pc))
return;
return NULL;
pc = lookup_page_cgroup(page);
+ /*
+ * Used bit is set without atomic ops but after smp_wmb().
+ * For making pc->mem_cgroup visible, insert smp_rmb() here.
+ */
+ smp_rmb();
+ if (!PageCgroupUsed(pc))
+ return NULL;
+
mz = page_cgroup_zoneinfo(pc);
if (!mz)
return NULL;
/*
* This routine finds the DFS walk successor. This routine should be
- * called with cgroup_mutex held
+ * called with hierarchy_mutex held
*/
static struct mem_cgroup *
-mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
{
struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
/*
* Walk down to children
*/
- mem_cgroup_put(curr);
cgroup = list_entry(curr_cgroup->children.next,
struct cgroup, sibling);
curr = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(curr);
goto done;
}
visit_parent:
if (curr_cgroup == root_cgroup) {
- mem_cgroup_put(curr);
- curr = root_mem;
- mem_cgroup_get(curr);
+ /* caller handles NULL case */
+ curr = NULL;
goto done;
}
* Goto next sibling
*/
if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
- mem_cgroup_put(curr);
cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
sibling);
curr = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(curr);
goto done;
}
goto visit_parent;
done:
- root_mem->last_scanned_child = curr;
return curr;
}
* that to reclaim free pages from.
*/
static struct mem_cgroup *
-mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
+mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
{
struct cgroup *cgroup;
- struct mem_cgroup *ret;
+ struct mem_cgroup *orig, *next;
bool obsolete;
- obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
-
/*
* Scan all children under the mem_cgroup mem
*/
- cgroup_lock();
+ mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+
+ orig = root_mem->last_scanned_child;
+ obsolete = mem_cgroup_is_obsolete(orig);
+
if (list_empty(&root_mem->css.cgroup->children)) {
- ret = root_mem;
+ /*
+ * root_mem might have children before and last_scanned_child
+ * may point to one of them. We put it later.
+ */
+ if (orig)
+ VM_BUG_ON(!obsolete);
+ next = NULL;
goto done;
}
- if (!root_mem->last_scanned_child || obsolete) {
-
- if (obsolete && root_mem->last_scanned_child)
- mem_cgroup_put(root_mem->last_scanned_child);
-
+ if (!orig || obsolete) {
cgroup = list_first_entry(&root_mem->css.cgroup->children,
struct cgroup, sibling);
- ret = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(ret);
+ next = mem_cgroup_from_cont(cgroup);
} else
- ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
- root_mem);
+ next = __mem_cgroup_get_next_node(orig, root_mem);
done:
- root_mem->last_scanned_child = ret;
- cgroup_unlock();
- return ret;
+ if (next)
+ mem_cgroup_get(next);
+ root_mem->last_scanned_child = next;
+ if (orig)
+ mem_cgroup_put(orig);
+ mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
+ return (next) ? next : root_mem;
}
static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
* but there might be left over accounting, even after children
* have left.
*/
- ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
+ ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
get_swappiness(root_mem));
if (mem_cgroup_check_under_limit(root_mem))
- return 0;
+ return 1; /* indicate reclaim has succeeded */
if (!root_mem->use_hierarchy)
return ret;
- next_mem = mem_cgroup_get_first_node(root_mem);
+ next_mem = mem_cgroup_get_next_node(root_mem);
while (next_mem != root_mem) {
if (mem_cgroup_is_obsolete(next_mem)) {
- mem_cgroup_put(next_mem);
- cgroup_lock();
- next_mem = mem_cgroup_get_first_node(root_mem);
- cgroup_unlock();
+ next_mem = mem_cgroup_get_next_node(root_mem);
continue;
}
- ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
+ ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
get_swappiness(next_mem));
if (mem_cgroup_check_under_limit(root_mem))
- return 0;
- cgroup_lock();
- next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
- cgroup_unlock();
+ return 1; /* indicate reclaim has succeeded */
+ next_mem = mem_cgroup_get_next_node(root_mem);
}
return ret;
}
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
noswap);
+ if (ret)
+ continue;
/*
* try_to_free_mem_cgroup_pages() might not give us a full
return -ENOMEM;
}
+static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
+{
+ struct mem_cgroup *mem;
+ swp_entry_t ent;
+
+ if (!PageSwapCache(page))
+ return NULL;
+
+ ent.val = page_private(page);
+ mem = lookup_swap_cgroup(ent);
+ if (!mem)
+ return NULL;
+ if (!css_tryget(&mem->css))
+ return NULL;
+ return mem;
+}
+
/*
* commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
* USED state. If already USED, uncharge and return.
if (pc->mem_cgroup != from)
goto out;
- css_put(&from->css);
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
if (do_swap_account)
res_counter_uncharge(&from->memsw, PAGE_SIZE);
+ css_put(&from->css);
+
+ css_get(&to->css);
pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, pc, true);
- css_get(&to->css);
ret = 0;
out:
unlock_page_cgroup(pc);
if (ret || !parent)
return ret;
- if (!get_page_unless_zero(page))
- return -EBUSY;
+ if (!get_page_unless_zero(page)) {
+ ret = -EBUSY;
+ goto uncharge;
+ }
ret = isolate_lru_page(page);
ret = mem_cgroup_move_account(pc, child, parent);
- /* drop extra refcnt by try_charge() (move_account increment one) */
- css_put(&parent->css);
putback_lru_page(page);
if (!ret) {
put_page(page);
+ /* drop extra refcnt by try_charge() */
+ css_put(&parent->css);
return 0;
}
- /* uncharge if move fails */
+
cancel:
+ put_page(page);
+uncharge:
+ /* drop extra refcnt by try_charge() */
+ css_put(&parent->css);
+ /* uncharge if move fails */
res_counter_uncharge(&parent->res, PAGE_SIZE);
if (do_swap_account)
res_counter_uncharge(&parent->memsw, PAGE_SIZE);
- put_page(page);
return ret;
}
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
+ struct mem_cgroup *mem = NULL;
+ int ret;
+
if (mem_cgroup_disabled())
return 0;
if (PageCompound(page))
* For GFP_NOWAIT case, the page may be pre-charged before calling
* add_to_page_cache(). (See shmem.c) check it here and avoid to call
* charge twice. (It works but has to pay a bit larger cost.)
+ * And when the page is SwapCache, it should take swap information
+ * into account. This is under lock_page() now.
*/
if (!(gfp_mask & __GFP_WAIT)) {
struct page_cgroup *pc;
unlock_page_cgroup(pc);
}
- if (unlikely(!mm))
+ if (do_swap_account && PageSwapCache(page)) {
+ mem = try_get_mem_cgroup_from_swapcache(page);
+ if (mem)
+ mm = NULL;
+ else
+ mem = NULL;
+ /* SwapCache may be still linked to LRU now. */
+ mem_cgroup_lru_del_before_commit_swapcache(page);
+ }
+
+ if (unlikely(!mm && !mem))
mm = &init_mm;
if (page_is_file_cache(page))
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
- else
- return mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
+
+ ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
+ if (mem)
+ css_put(&mem->css);
+ if (PageSwapCache(page))
+ mem_cgroup_lru_add_after_commit_swapcache(page);
+
+ if (do_swap_account && !ret && PageSwapCache(page)) {
+ swp_entry_t ent = {.val = page_private(page)};
+ /* avoid double counting */
+ mem = swap_cgroup_record(ent, NULL);
+ if (mem) {
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ mem_cgroup_put(mem);
+ }
+ }
+ return ret;
}
/*
gfp_t mask, struct mem_cgroup **ptr)
{
struct mem_cgroup *mem;
- swp_entry_t ent;
int ret;
if (mem_cgroup_disabled())
if (!do_swap_account)
goto charge_cur_mm;
-
/*
* A racing thread's fault, or swapoff, may have already updated
* the pte, and even removed page from swap cache: return success
*/
if (!PageSwapCache(page))
return 0;
-
- ent.val = page_private(page);
-
- mem = lookup_swap_cgroup(ent);
+ mem = try_get_mem_cgroup_from_swapcache(page);
if (!mem)
goto charge_cur_mm;
- if (!css_tryget(&mem->css))
- goto charge_cur_mm;
*ptr = mem;
ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
/* drop extra refcnt from tryget */
return __mem_cgroup_try_charge(mm, mask, ptr, true);
}
-#ifdef CONFIG_SWAP
-
-int mem_cgroup_cache_charge_swapin(struct page *page,
- struct mm_struct *mm, gfp_t mask, bool locked)
-{
- int ret = 0;
-
- if (mem_cgroup_disabled())
- return 0;
- if (unlikely(!mm))
- mm = &init_mm;
- if (!locked)
- lock_page(page);
- /*
- * If not locked, the page can be dropped from SwapCache until
- * we reach here.
- */
- if (PageSwapCache(page)) {
- struct mem_cgroup *mem = NULL;
- swp_entry_t ent;
-
- ent.val = page_private(page);
- if (do_swap_account) {
- mem = lookup_swap_cgroup(ent);
- if (mem) {
- if (css_tryget(&mem->css))
- mm = NULL; /* charge to recorded */
- else
- mem = NULL; /* charge to current */
- }
- }
- /* SwapCache may be still linked to LRU now. */
- mem_cgroup_lru_del_before_commit_swapcache(page);
- ret = mem_cgroup_charge_common(page, mm, mask,
- MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
- mem_cgroup_lru_add_after_commit_swapcache(page);
- /* drop extra refcnt from tryget */
- if (mem)
- css_put(&mem->css);
-
- if (!ret && do_swap_account) {
- /* avoid double counting */
- mem = swap_cgroup_record(ent, NULL);
- if (mem) {
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- mem_cgroup_put(mem);
- }
- }
- }
- if (!locked)
- unlock_page(page);
-
- return ret;
-}
-#endif
-
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
{
struct page_cgroup *pc;
* This is typically used for page reclaiming for shmem for reducing side
* effect of page allocation from shmem, which is used by some mem_cgroup.
*/
-int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
+int mem_cgroup_shrink_usage(struct page *page,
+ struct mm_struct *mm,
+ gfp_t gfp_mask)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem = NULL;
int progress = 0;
int retry = MEM_CGROUP_RECLAIM_RETRIES;
if (mem_cgroup_disabled())
return 0;
- if (!mm)
- return 0;
-
- mem = try_get_mem_cgroup_from_mm(mm);
+ if (page)
+ mem = try_get_mem_cgroup_from_swapcache(page);
+ if (!mem && mm)
+ mem = try_get_mem_cgroup_from_mm(mm);
if (unlikely(!mem))
return 0;
}
#endif
-static struct cgroup_subsys_state *
+static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem, *parent;
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
- mem_cgroup_put(mem_cgroup_from_cont(cont));
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
+
+ if (last_scanned_child) {
+ VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
+ mem_cgroup_put(last_scanned_child);
+ }
+ mem_cgroup_put(mem);
}
static int mem_cgroup_populate(struct cgroup_subsys *ss,