struct mem_cgroup_per_zone *mz;
unsigned long flags;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
/*
* We cannot lock_page_cgroup while holding zone's lru_lock,
* because other holders of lock_page_cgroup can be interrupted
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup_per_zone *mz;
- if (mem_cgroup_subsys.disabled)
- return 0;
-
- /*
- * Should page_cgroup's go to their own slab?
- * One could optimize the performance of the charging routine
- * by saving a bit in the page_flags and using it as a lock
- * to see if the cgroup page already has a page_cgroup associated
- * with it
- */
-retry:
- lock_page_cgroup(page);
- pc = page_get_page_cgroup(page);
- /*
- * The page_cgroup exists and
- * the page has already been accounted.
- */
- if (pc) {
- VM_BUG_ON(pc->page != page);
- VM_BUG_ON(!pc->mem_cgroup);
- unlock_page_cgroup(page);
- goto done;
- }
- unlock_page_cgroup(page);
-
pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
- if (pc == NULL)
+ if (unlikely(pc == NULL))
goto err;
/*
pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
lock_page_cgroup(page);
- if (page_get_page_cgroup(page)) {
+ if (unlikely(page_get_page_cgroup(page))) {
unlock_page_cgroup(page);
- /*
- * Another charge has been added to this page already.
- * We take lock_page_cgroup(page) again and read
- * page->cgroup, increment refcnt.... just retry is OK.
- */
res_counter_uncharge(&mem->res, PAGE_SIZE);
css_put(&mem->css);
kmem_cache_free(page_cgroup_cache, pc);
- goto retry;
+ goto done;
}
page_assign_page_cgroup(page, pc);
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
{
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
/*
* If already mapped, we don't have to account.
* If page cache, page->mapping has address_space.
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
+ /*
+ * Corner case handling. This is called from add_to_page_cache()
+ * in usual. But some FS (shmem) precharges this page before calling it
+ * and call add_to_page_cache() with GFP_NOWAIT.
+ *
+ * For GFP_NOWAIT case, the page may be pre-charged before calling
+ * add_to_page_cache(). (See shmem.c) check it here and avoid to call
+ * charge twice. (It works but has to pay a bit larger cost.)
+ */
+ if (!(gfp_mask & __GFP_WAIT)) {
+ struct page_cgroup *pc;
+
+ lock_page_cgroup(page);
+ pc = page_get_page_cgroup(page);
+ if (pc) {
+ VM_BUG_ON(pc->page != page);
+ VM_BUG_ON(!pc->mem_cgroup);
+ unlock_page_cgroup(page);
+ return 0;
+ }
+ unlock_page_cgroup(page);
+ }
+
if (unlikely(!mm))
mm = &init_mm;
+
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
}
*/
lock_page_cgroup(page);
pc = page_get_page_cgroup(page);
- if (!pc)
+ if (unlikely(!pc))
goto unlock;
VM_BUG_ON(pc->page != page);
mem_cgroup_uncharge_page(newpage);
}
+/*
+ * A call to try to shrink memory usage under specified resource controller.
+ * This is typically used for page reclaiming for shmem for reducing side
+ * effect of page allocation from shmem, which is used by some mem_cgroup.
+ */
+int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
+{
+ struct mem_cgroup *mem;
+ int progress = 0;
+ int retry = MEM_CGROUP_RECLAIM_RETRIES;
+
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+ if (!mm)
+ return 0;
+
+ rcu_read_lock();
+ mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ css_get(&mem->css);
+ rcu_read_unlock();
+
+ do {
+ progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
+ } while (!progress && --retry);
+
+ css_put(&mem->css);
+ if (!retry)
+ return -ENOMEM;
+ return 0;
+}
+
+int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
+{
+
+ int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ int progress;
+ int ret = 0;
+
+ while (res_counter_set_limit(&memcg->res, val)) {
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ if (!retry_count) {
+ ret = -EBUSY;
+ break;
+ }
+ progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
+ if (!progress)
+ retry_count--;
+ }
+ return ret;
+}
+
+
/*
* This routine traverse page_cgroup in given list and drop them all.
* *And* this routine doesn't reclaim page itself, just removes page_cgroup.
int ret = -EBUSY;
int node, zid;
- if (mem_cgroup_subsys.disabled)
- return 0;
-
css_get(&mem->css);
/*
* page reclaim code (kswapd etc..) will move pages between
return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
cft->private);
}
-
+/*
+ * The user of this function is...
+ * RES_LIMIT.
+ */
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
const char *buffer)
{
- return res_counter_write(&mem_cgroup_from_cont(cont)->res,
- cft->private, buffer,
- res_counter_memparse_write_strategy);
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ unsigned long long val;
+ int ret;
+
+ switch (cft->private) {
+ case RES_LIMIT:
+ /* This function does all necessary parse...reuse it */
+ ret = res_counter_memparse_write_strategy(buffer, &val);
+ if (!ret)
+ ret = mem_cgroup_resize_limit(memcg, val);
+ break;
+ default:
+ ret = -EINVAL; /* should be BUG() ? */
+ break;
+ }
+ return ret;
}
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
- if (mem_cgroup_subsys.disabled)
- return 0;
return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files));
}
struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem;
- if (mem_cgroup_subsys.disabled)
- return;
-
mm = get_task_mm(p);
if (mm == NULL)
return;
mem = mem_cgroup_from_cont(cont);
old_mem = mem_cgroup_from_cont(old_cont);
- if (mem == old_mem)
- goto out;
-
/*
* Only thread group leaders are allowed to migrate, the mm_struct is
* in effect owned by the leader