1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
24 #include <linux/smp.h>
25 #include <linux/page-flags.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bit_spinlock.h>
28 #include <linux/rcupdate.h>
29 #include <linux/swap.h>
30 #include <linux/spinlock.h>
33 #include <asm/uaccess.h>
35 struct cgroup_subsys mem_cgroup_subsys;
36 static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
39 * Statistics for memory cgroup.
41 enum mem_cgroup_stat_index {
43 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
45 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
46 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
48 MEM_CGROUP_STAT_NSTATS,
51 struct mem_cgroup_stat_cpu {
52 s64 count[MEM_CGROUP_STAT_NSTATS];
53 } ____cacheline_aligned_in_smp;
55 struct mem_cgroup_stat {
56 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
60 * For accounting under irq disable, no need for increment preempt count.
62 static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
63 enum mem_cgroup_stat_index idx, int val)
65 int cpu = smp_processor_id();
66 stat->cpustat[cpu].count[idx] += val;
69 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
70 enum mem_cgroup_stat_index idx)
74 for_each_possible_cpu(cpu)
75 ret += stat->cpustat[cpu].count[idx];
80 * The memory controller data structure. The memory controller controls both
81 * page cache and RSS per cgroup. We would eventually like to provide
82 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
83 * to help the administrator determine what knobs to tune.
85 * TODO: Add a water mark for the memory controller. Reclaim will begin when
86 * we hit the water mark. May be even add a low water mark, such that
87 * no reclaim occurs from a cgroup at it's low water mark, this is
88 * a feature that will be implemented much later in the future.
91 struct cgroup_subsys_state css;
93 * the counter to account for memory usage
95 struct res_counter res;
97 * Per cgroup active and inactive list, similar to the
99 * TODO: Consider making these lists per zone
101 struct list_head active_list;
102 struct list_head inactive_list;
104 * spin_lock to protect the per cgroup LRU
107 unsigned long control_type; /* control RSS or RSS+Pagecache */
111 struct mem_cgroup_stat stat;
115 * We use the lower bit of the page->page_cgroup pointer as a bit spin
116 * lock. We need to ensure that page->page_cgroup is atleast two
117 * byte aligned (based on comments from Nick Piggin)
119 #define PAGE_CGROUP_LOCK_BIT 0x0
120 #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
123 * A page_cgroup page is associated with every page descriptor. The
124 * page_cgroup helps us identify information about the cgroup
127 struct list_head lru; /* per cgroup LRU list */
129 struct mem_cgroup *mem_cgroup;
130 atomic_t ref_cnt; /* Helpful when pages move b/w */
131 /* mapped and cached states */
134 #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
135 #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
138 MEM_CGROUP_TYPE_UNSPEC = 0,
139 MEM_CGROUP_TYPE_MAPPED,
140 MEM_CGROUP_TYPE_CACHED,
146 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
147 MEM_CGROUP_CHARGE_TYPE_MAPPED,
151 * Always modified under lru lock. Then, not necessary to preempt_disable()
153 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
156 int val = (charge)? 1 : -1;
157 struct mem_cgroup_stat *stat = &mem->stat;
158 VM_BUG_ON(!irqs_disabled());
160 if (flags & PAGE_CGROUP_FLAG_CACHE)
161 __mem_cgroup_stat_add_safe(stat,
162 MEM_CGROUP_STAT_CACHE, val);
164 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
168 static struct mem_cgroup init_mem_cgroup;
171 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
173 return container_of(cgroup_subsys_state(cont,
174 mem_cgroup_subsys_id), struct mem_cgroup,
179 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
181 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
182 struct mem_cgroup, css);
185 void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
187 struct mem_cgroup *mem;
189 mem = mem_cgroup_from_task(p);
191 mm->mem_cgroup = mem;
194 void mm_free_cgroup(struct mm_struct *mm)
196 css_put(&mm->mem_cgroup->css);
199 static inline int page_cgroup_locked(struct page *page)
201 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
205 void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
210 * While resetting the page_cgroup we might not hold the
211 * page_cgroup lock. free_hot_cold_page() is an example
215 VM_BUG_ON(!page_cgroup_locked(page));
216 locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
217 page->page_cgroup = ((unsigned long)pc | locked);
220 struct page_cgroup *page_get_page_cgroup(struct page *page)
222 return (struct page_cgroup *)
223 (page->page_cgroup & ~PAGE_CGROUP_LOCK);
226 static void __always_inline lock_page_cgroup(struct page *page)
228 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
229 VM_BUG_ON(!page_cgroup_locked(page));
232 static void __always_inline unlock_page_cgroup(struct page *page)
234 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
238 * Tie new page_cgroup to struct page under lock_page_cgroup()
239 * This can fail if the page has been tied to a page_cgroup.
240 * If success, returns 0.
242 static int page_cgroup_assign_new_page_cgroup(struct page *page,
243 struct page_cgroup *pc)
247 lock_page_cgroup(page);
248 if (!page_get_page_cgroup(page))
249 page_assign_page_cgroup(page, pc);
250 else /* A page is tied to other pc. */
252 unlock_page_cgroup(page);
257 * Clear page->page_cgroup member under lock_page_cgroup().
258 * If given "pc" value is different from one page->page_cgroup,
259 * page->cgroup is not cleared.
260 * Returns a value of page->page_cgroup at lock taken.
261 * A can can detect failure of clearing by following
262 * clear_page_cgroup(page, pc) == pc
265 static struct page_cgroup *clear_page_cgroup(struct page *page,
266 struct page_cgroup *pc)
268 struct page_cgroup *ret;
270 lock_page_cgroup(page);
271 ret = page_get_page_cgroup(page);
272 if (likely(ret == pc))
273 page_assign_page_cgroup(page, NULL);
274 unlock_page_cgroup(page);
278 static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
281 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
282 list_move(&pc->lru, &pc->mem_cgroup->active_list);
284 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
285 list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
289 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
294 ret = task->mm && mm_cgroup(task->mm) == mem;
300 * This routine assumes that the appropriate zone's lru lock is already held
302 void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
304 struct mem_cgroup *mem;
308 mem = pc->mem_cgroup;
310 spin_lock(&mem->lru_lock);
311 __mem_cgroup_move_lists(pc, active);
312 spin_unlock(&mem->lru_lock);
315 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
316 struct list_head *dst,
317 unsigned long *scanned, int order,
318 int mode, struct zone *z,
319 struct mem_cgroup *mem_cont,
322 unsigned long nr_taken = 0;
326 struct list_head *src;
327 struct page_cgroup *pc, *tmp;
330 src = &mem_cont->active_list;
332 src = &mem_cont->inactive_list;
334 spin_lock(&mem_cont->lru_lock);
336 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
337 if (scan >= nr_to_scan)
342 if (unlikely(!PageLRU(page)))
345 if (PageActive(page) && !active) {
346 __mem_cgroup_move_lists(pc, true);
349 if (!PageActive(page) && active) {
350 __mem_cgroup_move_lists(pc, false);
356 * TODO: make the active/inactive lists per zone
358 if (page_zone(page) != z)
362 list_move(&pc->lru, &pc_list);
364 if (__isolate_lru_page(page, mode) == 0) {
365 list_move(&page->lru, dst);
370 list_splice(&pc_list, src);
371 spin_unlock(&mem_cont->lru_lock);
378 * Charge the memory controller for page usage.
380 * 0 if the charge was successful
381 * < 0 if the cgroup is over its limit
383 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
384 gfp_t gfp_mask, enum charge_type ctype)
386 struct mem_cgroup *mem;
387 struct page_cgroup *pc;
389 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
392 * Should page_cgroup's go to their own slab?
393 * One could optimize the performance of the charging routine
394 * by saving a bit in the page_flags and using it as a lock
395 * to see if the cgroup page already has a page_cgroup associated
400 lock_page_cgroup(page);
401 pc = page_get_page_cgroup(page);
403 * The page_cgroup exists and
404 * the page has already been accounted.
407 if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
408 /* this page is under being uncharged ? */
409 unlock_page_cgroup(page);
413 unlock_page_cgroup(page);
417 unlock_page_cgroup(page);
420 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
425 * We always charge the cgroup the mm_struct belongs to.
426 * The mm_struct's mem_cgroup changes on task migration if the
427 * thread group leader migrates. It's possible that mm is not
428 * set, if so charge the init_mm (happens for pagecache usage).
434 mem = rcu_dereference(mm->mem_cgroup);
436 * For every charge from the cgroup, increment reference
443 * If we created the page_cgroup, we should free it on exceeding
446 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
447 if (!(gfp_mask & __GFP_WAIT))
450 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
454 * try_to_free_mem_cgroup_pages() might not give us a full
455 * picture of reclaim. Some pages are reclaimed and might be
456 * moved to swap cache or just unmapped from the cgroup.
457 * Check the limit again to see if the reclaim reduced the
458 * current usage of the cgroup before giving up
460 if (res_counter_check_under_limit(&mem->res))
464 mem_cgroup_out_of_memory(mem, gfp_mask);
467 congestion_wait(WRITE, HZ/10);
470 atomic_set(&pc->ref_cnt, 1);
471 pc->mem_cgroup = mem;
473 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
474 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
475 pc->flags |= PAGE_CGROUP_FLAG_CACHE;
477 if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
479 * Another charge has been added to this page already.
480 * We take lock_page_cgroup(page) again and read
481 * page->cgroup, increment refcnt.... just retry is OK.
483 res_counter_uncharge(&mem->res, PAGE_SIZE);
491 spin_lock_irqsave(&mem->lru_lock, flags);
492 /* Update statistics vector */
493 mem_cgroup_charge_statistics(mem, pc->flags, true);
494 list_add(&pc->lru, &mem->active_list);
495 spin_unlock_irqrestore(&mem->lru_lock, flags);
506 int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
509 return mem_cgroup_charge_common(page, mm, gfp_mask,
510 MEM_CGROUP_CHARGE_TYPE_MAPPED);
514 * See if the cached pages should be charged at all?
516 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
520 struct mem_cgroup *mem;
525 mem = rcu_dereference(mm->mem_cgroup);
528 if (mem->control_type == MEM_CGROUP_TYPE_ALL)
529 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
530 MEM_CGROUP_CHARGE_TYPE_CACHE);
536 * Uncharging is always a welcome operation, we never complain, simply
539 void mem_cgroup_uncharge(struct page_cgroup *pc)
541 struct mem_cgroup *mem;
546 * This can handle cases when a page is not charged at all and we
547 * are switching between handling the control_type.
552 if (atomic_dec_and_test(&pc->ref_cnt)) {
555 * get page->cgroup and clear it under lock.
556 * force_empty can drop page->cgroup without checking refcnt.
558 if (clear_page_cgroup(page, pc) == pc) {
559 mem = pc->mem_cgroup;
561 res_counter_uncharge(&mem->res, PAGE_SIZE);
562 spin_lock_irqsave(&mem->lru_lock, flags);
563 list_del_init(&pc->lru);
564 mem_cgroup_charge_statistics(mem, pc->flags, false);
565 spin_unlock_irqrestore(&mem->lru_lock, flags);
571 * Returns non-zero if a page (under migration) has valid page_cgroup member.
572 * Refcnt of page_cgroup is incremented.
575 int mem_cgroup_prepare_migration(struct page *page)
577 struct page_cgroup *pc;
579 lock_page_cgroup(page);
580 pc = page_get_page_cgroup(page);
581 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
583 unlock_page_cgroup(page);
587 void mem_cgroup_end_migration(struct page *page)
589 struct page_cgroup *pc = page_get_page_cgroup(page);
590 mem_cgroup_uncharge(pc);
593 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
594 * And no race with uncharge() routines because page_cgroup for *page*
595 * has extra one reference by mem_cgroup_prepare_migration.
598 void mem_cgroup_page_migration(struct page *page, struct page *newpage)
600 struct page_cgroup *pc;
602 pc = page_get_page_cgroup(page);
605 if (clear_page_cgroup(page, pc) != pc)
608 lock_page_cgroup(newpage);
609 page_assign_page_cgroup(newpage, pc);
610 unlock_page_cgroup(newpage);
615 * This routine traverse page_cgroup in given list and drop them all.
616 * This routine ignores page_cgroup->ref_cnt.
617 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
619 #define FORCE_UNCHARGE_BATCH (128)
621 mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list)
623 struct page_cgroup *pc;
629 count = FORCE_UNCHARGE_BATCH;
630 spin_lock_irqsave(&mem->lru_lock, flags);
632 while (--count && !list_empty(list)) {
633 pc = list_entry(list->prev, struct page_cgroup, lru);
635 /* Avoid race with charge */
636 atomic_set(&pc->ref_cnt, 0);
637 if (clear_page_cgroup(page, pc) == pc) {
639 res_counter_uncharge(&mem->res, PAGE_SIZE);
640 list_del_init(&pc->lru);
641 mem_cgroup_charge_statistics(mem, pc->flags, false);
643 } else /* being uncharged ? ...do relax */
646 spin_unlock_irqrestore(&mem->lru_lock, flags);
647 if (!list_empty(list)) {
655 * make mem_cgroup's charge to be 0 if there is no task.
656 * This enables deleting this mem_cgroup.
659 int mem_cgroup_force_empty(struct mem_cgroup *mem)
664 * page reclaim code (kswapd etc..) will move pages between
665 ` * active_list <-> inactive_list while we don't take a lock.
666 * So, we have to do loop here until all lists are empty.
668 while (!(list_empty(&mem->active_list) &&
669 list_empty(&mem->inactive_list))) {
670 if (atomic_read(&mem->css.cgroup->count) > 0)
672 /* drop all page_cgroup in active_list */
673 mem_cgroup_force_empty_list(mem, &mem->active_list);
674 /* drop all page_cgroup in inactive_list */
675 mem_cgroup_force_empty_list(mem, &mem->inactive_list);
685 int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
687 *tmp = memparse(buf, &buf);
692 * Round up the value to the closest page size
694 *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
698 static ssize_t mem_cgroup_read(struct cgroup *cont,
699 struct cftype *cft, struct file *file,
700 char __user *userbuf, size_t nbytes, loff_t *ppos)
702 return res_counter_read(&mem_cgroup_from_cont(cont)->res,
703 cft->private, userbuf, nbytes, ppos,
707 static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
708 struct file *file, const char __user *userbuf,
709 size_t nbytes, loff_t *ppos)
711 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
712 cft->private, userbuf, nbytes, ppos,
713 mem_cgroup_write_strategy);
716 static ssize_t mem_control_type_write(struct cgroup *cont,
717 struct cftype *cft, struct file *file,
718 const char __user *userbuf,
719 size_t nbytes, loff_t *pos)
724 struct mem_cgroup *mem;
726 mem = mem_cgroup_from_cont(cont);
727 buf = kmalloc(nbytes + 1, GFP_KERNEL);
734 if (copy_from_user(buf, userbuf, nbytes))
738 tmp = simple_strtoul(buf, &end, 10);
742 if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX)
745 mem->control_type = tmp;
753 static ssize_t mem_control_type_read(struct cgroup *cont,
755 struct file *file, char __user *userbuf,
756 size_t nbytes, loff_t *ppos)
760 struct mem_cgroup *mem;
762 mem = mem_cgroup_from_cont(cont);
764 val = mem->control_type;
765 s += sprintf(s, "%lu\n", val);
766 return simple_read_from_buffer((void __user *)userbuf, nbytes,
771 static ssize_t mem_force_empty_write(struct cgroup *cont,
772 struct cftype *cft, struct file *file,
773 const char __user *userbuf,
774 size_t nbytes, loff_t *ppos)
776 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
778 ret = mem_cgroup_force_empty(mem);
785 * Note: This should be removed if cgroup supports write-only file.
788 static ssize_t mem_force_empty_read(struct cgroup *cont,
790 struct file *file, char __user *userbuf,
791 size_t nbytes, loff_t *ppos)
797 static struct cftype mem_cgroup_files[] = {
799 .name = "usage_in_bytes",
800 .private = RES_USAGE,
801 .read = mem_cgroup_read,
804 .name = "limit_in_bytes",
805 .private = RES_LIMIT,
806 .write = mem_cgroup_write,
807 .read = mem_cgroup_read,
811 .private = RES_FAILCNT,
812 .read = mem_cgroup_read,
815 .name = "control_type",
816 .write = mem_control_type_write,
817 .read = mem_control_type_read,
820 .name = "force_empty",
821 .write = mem_force_empty_write,
822 .read = mem_force_empty_read,
826 static struct mem_cgroup init_mem_cgroup;
828 static struct cgroup_subsys_state *
829 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
831 struct mem_cgroup *mem;
833 if (unlikely((cont->parent) == NULL)) {
834 mem = &init_mem_cgroup;
835 init_mm.mem_cgroup = mem;
837 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
842 res_counter_init(&mem->res);
843 INIT_LIST_HEAD(&mem->active_list);
844 INIT_LIST_HEAD(&mem->inactive_list);
845 spin_lock_init(&mem->lru_lock);
846 mem->control_type = MEM_CGROUP_TYPE_ALL;
850 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
853 kfree(mem_cgroup_from_cont(cont));
856 static int mem_cgroup_populate(struct cgroup_subsys *ss,
859 return cgroup_add_files(cont, ss, mem_cgroup_files,
860 ARRAY_SIZE(mem_cgroup_files));
863 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
865 struct cgroup *old_cont,
866 struct task_struct *p)
868 struct mm_struct *mm;
869 struct mem_cgroup *mem, *old_mem;
875 mem = mem_cgroup_from_cont(cont);
876 old_mem = mem_cgroup_from_cont(old_cont);
882 * Only thread group leaders are allowed to migrate, the mm_struct is
883 * in effect owned by the leader
885 if (p->tgid != p->pid)
889 rcu_assign_pointer(mm->mem_cgroup, mem);
890 css_put(&old_mem->css);
897 struct cgroup_subsys mem_cgroup_subsys = {
899 .subsys_id = mem_cgroup_subsys_id,
900 .create = mem_cgroup_create,
901 .destroy = mem_cgroup_destroy,
902 .populate = mem_cgroup_populate,
903 .attach = mem_cgroup_move_task,