1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
24 #include <linux/smp.h>
25 #include <linux/page-flags.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bit_spinlock.h>
28 #include <linux/rcupdate.h>
29 #include <linux/slab.h>
30 #include <linux/swap.h>
31 #include <linux/spinlock.h>
33 #include <linux/seq_file.h>
34 #include <linux/vmalloc.h>
36 #include <asm/uaccess.h>
38 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
39 static struct kmem_cache *page_cgroup_cache __read_mostly;
40 #define MEM_CGROUP_RECLAIM_RETRIES 5
43 * Statistics for memory cgroup.
45 enum mem_cgroup_stat_index {
47 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
49 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
50 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
51 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
52 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
54 MEM_CGROUP_STAT_NSTATS,
57 struct mem_cgroup_stat_cpu {
58 s64 count[MEM_CGROUP_STAT_NSTATS];
59 } ____cacheline_aligned_in_smp;
61 struct mem_cgroup_stat {
62 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
66 * For accounting under irq disable, no need for increment preempt count.
68 static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
69 enum mem_cgroup_stat_index idx, int val)
71 int cpu = smp_processor_id();
72 stat->cpustat[cpu].count[idx] += val;
75 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
76 enum mem_cgroup_stat_index idx)
80 for_each_possible_cpu(cpu)
81 ret += stat->cpustat[cpu].count[idx];
86 * per-zone information in memory controller.
89 enum mem_cgroup_zstat_index {
90 MEM_CGROUP_ZSTAT_ACTIVE,
91 MEM_CGROUP_ZSTAT_INACTIVE,
96 struct mem_cgroup_per_zone {
98 * spin_lock to protect the per cgroup LRU
101 struct list_head active_list;
102 struct list_head inactive_list;
103 unsigned long count[NR_MEM_CGROUP_ZSTAT];
105 /* Macro for accessing counter */
106 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
108 struct mem_cgroup_per_node {
109 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
112 struct mem_cgroup_lru_info {
113 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
117 * The memory controller data structure. The memory controller controls both
118 * page cache and RSS per cgroup. We would eventually like to provide
119 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
120 * to help the administrator determine what knobs to tune.
122 * TODO: Add a water mark for the memory controller. Reclaim will begin when
123 * we hit the water mark. May be even add a low water mark, such that
124 * no reclaim occurs from a cgroup at it's low water mark, this is
125 * a feature that will be implemented much later in the future.
128 struct cgroup_subsys_state css;
130 * the counter to account for memory usage
132 struct res_counter res;
134 * Per cgroup active and inactive list, similar to the
135 * per zone LRU lists.
137 struct mem_cgroup_lru_info info;
139 int prev_priority; /* for recording reclaim priority */
143 struct mem_cgroup_stat stat;
145 static struct mem_cgroup init_mem_cgroup;
148 * We use the lower bit of the page->page_cgroup pointer as a bit spin
149 * lock. We need to ensure that page->page_cgroup is at least two
150 * byte aligned (based on comments from Nick Piggin). But since
151 * bit_spin_lock doesn't actually set that lock bit in a non-debug
152 * uniprocessor kernel, we should avoid setting it here too.
154 #define PAGE_CGROUP_LOCK_BIT 0x0
155 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
156 #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
158 #define PAGE_CGROUP_LOCK 0x0
162 * A page_cgroup page is associated with every page descriptor. The
163 * page_cgroup helps us identify information about the cgroup
166 struct list_head lru; /* per cgroup LRU list */
168 struct mem_cgroup *mem_cgroup;
169 int ref_cnt; /* cached, mapped, migrating */
172 #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
173 #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
175 static int page_cgroup_nid(struct page_cgroup *pc)
177 return page_to_nid(pc->page);
180 static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
182 return page_zonenum(pc->page);
186 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
187 MEM_CGROUP_CHARGE_TYPE_MAPPED,
191 * Always modified under lru lock. Then, not necessary to preempt_disable()
193 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
196 int val = (charge)? 1 : -1;
197 struct mem_cgroup_stat *stat = &mem->stat;
199 VM_BUG_ON(!irqs_disabled());
200 if (flags & PAGE_CGROUP_FLAG_CACHE)
201 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
203 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
206 __mem_cgroup_stat_add_safe(stat,
207 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
209 __mem_cgroup_stat_add_safe(stat,
210 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
213 static struct mem_cgroup_per_zone *
214 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
216 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
219 static struct mem_cgroup_per_zone *
220 page_cgroup_zoneinfo(struct page_cgroup *pc)
222 struct mem_cgroup *mem = pc->mem_cgroup;
223 int nid = page_cgroup_nid(pc);
224 int zid = page_cgroup_zid(pc);
226 return mem_cgroup_zoneinfo(mem, nid, zid);
229 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
230 enum mem_cgroup_zstat_index idx)
233 struct mem_cgroup_per_zone *mz;
236 for_each_online_node(nid)
237 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
238 mz = mem_cgroup_zoneinfo(mem, nid, zid);
239 total += MEM_CGROUP_ZSTAT(mz, idx);
244 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
246 return container_of(cgroup_subsys_state(cont,
247 mem_cgroup_subsys_id), struct mem_cgroup,
251 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
253 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
254 struct mem_cgroup, css);
257 static inline int page_cgroup_locked(struct page *page)
259 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
262 static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
264 VM_BUG_ON(!page_cgroup_locked(page));
265 page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
268 struct page_cgroup *page_get_page_cgroup(struct page *page)
270 return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
273 static void lock_page_cgroup(struct page *page)
275 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
278 static int try_lock_page_cgroup(struct page *page)
280 return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
283 static void unlock_page_cgroup(struct page *page)
285 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
288 static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
289 struct page_cgroup *pc)
291 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
294 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
296 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
298 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
302 static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
303 struct page_cgroup *pc)
305 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
308 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
309 list_add(&pc->lru, &mz->inactive_list);
311 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
312 list_add(&pc->lru, &mz->active_list);
314 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
317 static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
319 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
320 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
323 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
325 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
328 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
329 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
330 list_move(&pc->lru, &mz->active_list);
332 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
333 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
334 list_move(&pc->lru, &mz->inactive_list);
338 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
343 ret = task->mm && mm_match_cgroup(task->mm, mem);
349 * This routine assumes that the appropriate zone's lru lock is already held
351 void mem_cgroup_move_lists(struct page *page, bool active)
353 struct page_cgroup *pc;
354 struct mem_cgroup_per_zone *mz;
358 * We cannot lock_page_cgroup while holding zone's lru_lock,
359 * because other holders of lock_page_cgroup can be interrupted
360 * with an attempt to rotate_reclaimable_page. But we cannot
361 * safely get to page_cgroup without it, so just try_lock it:
362 * mem_cgroup_isolate_pages allows for page left on wrong list.
364 if (!try_lock_page_cgroup(page))
367 pc = page_get_page_cgroup(page);
369 mz = page_cgroup_zoneinfo(pc);
370 spin_lock_irqsave(&mz->lru_lock, flags);
371 __mem_cgroup_move_lists(pc, active);
372 spin_unlock_irqrestore(&mz->lru_lock, flags);
374 unlock_page_cgroup(page);
378 * Calculate mapped_ratio under memory controller. This will be used in
379 * vmscan.c for deteremining we have to reclaim mapped pages.
381 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
386 * usage is recorded in bytes. But, here, we assume the number of
387 * physical pages can be represented by "long" on any arch.
389 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
390 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
391 return (int)((rss * 100L) / total);
395 * This function is called from vmscan.c. In page reclaiming loop. balance
396 * between active and inactive list is calculated. For memory controller
397 * page reclaiming, we should use using mem_cgroup's imbalance rather than
398 * zone's global lru imbalance.
400 long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
402 unsigned long active, inactive;
403 /* active and inactive are the number of pages. 'long' is ok.*/
404 active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
405 inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
406 return (long) (active / (inactive + 1));
410 * prev_priority control...this will be used in memory reclaim path.
412 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
414 return mem->prev_priority;
417 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
419 if (priority < mem->prev_priority)
420 mem->prev_priority = priority;
423 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
425 mem->prev_priority = priority;
429 * Calculate # of pages to be scanned in this priority/zone.
432 * priority starts from "DEF_PRIORITY" and decremented in each loop.
433 * (see include/linux/mmzone.h)
436 long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
437 struct zone *zone, int priority)
440 int nid = zone->zone_pgdat->node_id;
441 int zid = zone_idx(zone);
442 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
444 nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
445 return (nr_active >> priority);
448 long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
449 struct zone *zone, int priority)
452 int nid = zone->zone_pgdat->node_id;
453 int zid = zone_idx(zone);
454 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
456 nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
457 return (nr_inactive >> priority);
460 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
461 struct list_head *dst,
462 unsigned long *scanned, int order,
463 int mode, struct zone *z,
464 struct mem_cgroup *mem_cont,
467 unsigned long nr_taken = 0;
471 struct list_head *src;
472 struct page_cgroup *pc, *tmp;
473 int nid = z->zone_pgdat->node_id;
474 int zid = zone_idx(z);
475 struct mem_cgroup_per_zone *mz;
478 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
480 src = &mz->active_list;
482 src = &mz->inactive_list;
485 spin_lock(&mz->lru_lock);
487 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
488 if (scan >= nr_to_scan)
492 if (unlikely(!PageLRU(page)))
495 if (PageActive(page) && !active) {
496 __mem_cgroup_move_lists(pc, true);
499 if (!PageActive(page) && active) {
500 __mem_cgroup_move_lists(pc, false);
505 list_move(&pc->lru, &pc_list);
507 if (__isolate_lru_page(page, mode) == 0) {
508 list_move(&page->lru, dst);
513 list_splice(&pc_list, src);
514 spin_unlock(&mz->lru_lock);
521 * Charge the memory controller for page usage.
523 * 0 if the charge was successful
524 * < 0 if the cgroup is over its limit
526 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
527 gfp_t gfp_mask, enum charge_type ctype,
528 struct mem_cgroup *memcg)
530 struct mem_cgroup *mem;
531 struct page_cgroup *pc;
533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
534 struct mem_cgroup_per_zone *mz;
536 if (mem_cgroup_subsys.disabled)
540 * Should page_cgroup's go to their own slab?
541 * One could optimize the performance of the charging routine
542 * by saving a bit in the page_flags and using it as a lock
543 * to see if the cgroup page already has a page_cgroup associated
547 lock_page_cgroup(page);
548 pc = page_get_page_cgroup(page);
550 * The page_cgroup exists and
551 * the page has already been accounted.
554 VM_BUG_ON(pc->page != page);
555 VM_BUG_ON(pc->ref_cnt <= 0);
558 unlock_page_cgroup(page);
561 unlock_page_cgroup(page);
563 pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
568 * We always charge the cgroup the mm_struct belongs to.
569 * The mm_struct's mem_cgroup changes on task migration if the
570 * thread group leader migrates. It's possible that mm is not
571 * set, if so charge the init_mm (happens for pagecache usage).
578 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
580 * For every charge from the cgroup, increment reference count
586 css_get(&memcg->css);
589 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
590 if (!(gfp_mask & __GFP_WAIT))
593 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
597 * try_to_free_mem_cgroup_pages() might not give us a full
598 * picture of reclaim. Some pages are reclaimed and might be
599 * moved to swap cache or just unmapped from the cgroup.
600 * Check the limit again to see if the reclaim reduced the
601 * current usage of the cgroup before giving up
603 if (res_counter_check_under_limit(&mem->res))
607 mem_cgroup_out_of_memory(mem, gfp_mask);
613 pc->mem_cgroup = mem;
616 * If a page is accounted as a page cache, insert to inactive list.
617 * If anon, insert to active list.
619 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
620 pc->flags = PAGE_CGROUP_FLAG_CACHE;
622 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
624 lock_page_cgroup(page);
625 if (page_get_page_cgroup(page)) {
626 unlock_page_cgroup(page);
628 * Another charge has been added to this page already.
629 * We take lock_page_cgroup(page) again and read
630 * page->cgroup, increment refcnt.... just retry is OK.
632 res_counter_uncharge(&mem->res, PAGE_SIZE);
634 kmem_cache_free(page_cgroup_cache, pc);
637 page_assign_page_cgroup(page, pc);
639 mz = page_cgroup_zoneinfo(pc);
640 spin_lock_irqsave(&mz->lru_lock, flags);
641 __mem_cgroup_add_list(mz, pc);
642 spin_unlock_irqrestore(&mz->lru_lock, flags);
644 unlock_page_cgroup(page);
649 kmem_cache_free(page_cgroup_cache, pc);
654 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
656 return mem_cgroup_charge_common(page, mm, gfp_mask,
657 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
660 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
665 return mem_cgroup_charge_common(page, mm, gfp_mask,
666 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
669 int mem_cgroup_getref(struct page *page)
671 struct page_cgroup *pc;
673 if (mem_cgroup_subsys.disabled)
676 lock_page_cgroup(page);
677 pc = page_get_page_cgroup(page);
680 unlock_page_cgroup(page);
685 * Uncharging is always a welcome operation, we never complain, simply
688 void mem_cgroup_uncharge_page(struct page *page)
690 struct page_cgroup *pc;
691 struct mem_cgroup *mem;
692 struct mem_cgroup_per_zone *mz;
695 if (mem_cgroup_subsys.disabled)
699 * Check if our page_cgroup is valid
701 lock_page_cgroup(page);
702 pc = page_get_page_cgroup(page);
706 VM_BUG_ON(pc->page != page);
707 VM_BUG_ON(pc->ref_cnt <= 0);
709 if (--(pc->ref_cnt) == 0) {
710 mz = page_cgroup_zoneinfo(pc);
711 spin_lock_irqsave(&mz->lru_lock, flags);
712 __mem_cgroup_remove_list(mz, pc);
713 spin_unlock_irqrestore(&mz->lru_lock, flags);
715 page_assign_page_cgroup(page, NULL);
716 unlock_page_cgroup(page);
718 mem = pc->mem_cgroup;
719 res_counter_uncharge(&mem->res, PAGE_SIZE);
722 kmem_cache_free(page_cgroup_cache, pc);
727 unlock_page_cgroup(page);
731 * Before starting migration, account against new page.
733 int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
735 struct page_cgroup *pc;
736 struct mem_cgroup *mem = NULL;
737 enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
740 if (mem_cgroup_subsys.disabled)
743 lock_page_cgroup(page);
744 pc = page_get_page_cgroup(page);
746 mem = pc->mem_cgroup;
748 if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
749 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
751 unlock_page_cgroup(page);
753 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
760 /* remove redundant charge */
761 void mem_cgroup_end_migration(struct page *newpage)
763 mem_cgroup_uncharge_page(newpage);
767 * This routine traverse page_cgroup in given list and drop them all.
768 * This routine ignores page_cgroup->ref_cnt.
769 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
771 #define FORCE_UNCHARGE_BATCH (128)
772 static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
773 struct mem_cgroup_per_zone *mz,
776 struct page_cgroup *pc;
778 int count = FORCE_UNCHARGE_BATCH;
780 struct list_head *list;
783 list = &mz->active_list;
785 list = &mz->inactive_list;
787 spin_lock_irqsave(&mz->lru_lock, flags);
788 while (!list_empty(list)) {
789 pc = list_entry(list->prev, struct page_cgroup, lru);
792 spin_unlock_irqrestore(&mz->lru_lock, flags);
794 * Check if this page is on LRU. !LRU page can be found
795 * if it's under page migration.
798 mem_cgroup_uncharge_page(page);
801 count = FORCE_UNCHARGE_BATCH;
806 spin_lock_irqsave(&mz->lru_lock, flags);
808 spin_unlock_irqrestore(&mz->lru_lock, flags);
812 * make mem_cgroup's charge to be 0 if there is no task.
813 * This enables deleting this mem_cgroup.
815 static int mem_cgroup_force_empty(struct mem_cgroup *mem)
820 if (mem_cgroup_subsys.disabled)
825 * page reclaim code (kswapd etc..) will move pages between
826 * active_list <-> inactive_list while we don't take a lock.
827 * So, we have to do loop here until all lists are empty.
829 while (mem->res.usage > 0) {
830 if (atomic_read(&mem->css.cgroup->count) > 0)
832 for_each_node_state(node, N_POSSIBLE)
833 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
834 struct mem_cgroup_per_zone *mz;
835 mz = mem_cgroup_zoneinfo(mem, node, zid);
836 /* drop all page_cgroup in active_list */
837 mem_cgroup_force_empty_list(mem, mz, 1);
838 /* drop all page_cgroup in inactive_list */
839 mem_cgroup_force_empty_list(mem, mz, 0);
848 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
850 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
854 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
857 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
858 cft->private, buffer,
859 res_counter_memparse_write_strategy);
862 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
864 struct mem_cgroup *mem;
866 mem = mem_cgroup_from_cont(cont);
869 res_counter_reset_max(&mem->res);
872 res_counter_reset_failcnt(&mem->res);
878 static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
880 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
883 static const struct mem_cgroup_stat_desc {
886 } mem_cgroup_stat_desc[] = {
887 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
888 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
889 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
890 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
893 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
894 struct cgroup_map_cb *cb)
896 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
897 struct mem_cgroup_stat *stat = &mem_cont->stat;
900 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
903 val = mem_cgroup_read_stat(stat, i);
904 val *= mem_cgroup_stat_desc[i].unit;
905 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
907 /* showing # of active pages */
909 unsigned long active, inactive;
911 inactive = mem_cgroup_get_all_zonestat(mem_cont,
912 MEM_CGROUP_ZSTAT_INACTIVE);
913 active = mem_cgroup_get_all_zonestat(mem_cont,
914 MEM_CGROUP_ZSTAT_ACTIVE);
915 cb->fill(cb, "active", (active) * PAGE_SIZE);
916 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
921 static struct cftype mem_cgroup_files[] = {
923 .name = "usage_in_bytes",
924 .private = RES_USAGE,
925 .read_u64 = mem_cgroup_read,
928 .name = "max_usage_in_bytes",
929 .private = RES_MAX_USAGE,
930 .trigger = mem_cgroup_reset,
931 .read_u64 = mem_cgroup_read,
934 .name = "limit_in_bytes",
935 .private = RES_LIMIT,
936 .write_string = mem_cgroup_write,
937 .read_u64 = mem_cgroup_read,
941 .private = RES_FAILCNT,
942 .trigger = mem_cgroup_reset,
943 .read_u64 = mem_cgroup_read,
946 .name = "force_empty",
947 .trigger = mem_force_empty_write,
951 .read_map = mem_control_stat_show,
955 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
957 struct mem_cgroup_per_node *pn;
958 struct mem_cgroup_per_zone *mz;
959 int zone, tmp = node;
961 * This routine is called against possible nodes.
962 * But it's BUG to call kmalloc() against offline node.
964 * TODO: this routine can waste much memory for nodes which will
965 * never be onlined. It's better to use memory hotplug callback
968 if (!node_state(node, N_NORMAL_MEMORY))
970 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
974 mem->info.nodeinfo[node] = pn;
975 memset(pn, 0, sizeof(*pn));
977 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
978 mz = &pn->zoneinfo[zone];
979 INIT_LIST_HEAD(&mz->active_list);
980 INIT_LIST_HEAD(&mz->inactive_list);
981 spin_lock_init(&mz->lru_lock);
986 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
988 kfree(mem->info.nodeinfo[node]);
991 static struct mem_cgroup *mem_cgroup_alloc(void)
993 struct mem_cgroup *mem;
995 if (sizeof(*mem) < PAGE_SIZE)
996 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
998 mem = vmalloc(sizeof(*mem));
1001 memset(mem, 0, sizeof(*mem));
1005 static void mem_cgroup_free(struct mem_cgroup *mem)
1007 if (sizeof(*mem) < PAGE_SIZE)
1014 static struct cgroup_subsys_state *
1015 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1017 struct mem_cgroup *mem;
1020 if (unlikely((cont->parent) == NULL)) {
1021 mem = &init_mem_cgroup;
1022 page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
1024 mem = mem_cgroup_alloc();
1026 return ERR_PTR(-ENOMEM);
1029 res_counter_init(&mem->res);
1031 for_each_node_state(node, N_POSSIBLE)
1032 if (alloc_mem_cgroup_per_zone_info(mem, node))
1037 for_each_node_state(node, N_POSSIBLE)
1038 free_mem_cgroup_per_zone_info(mem, node);
1039 if (cont->parent != NULL)
1040 mem_cgroup_free(mem);
1041 return ERR_PTR(-ENOMEM);
1044 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1045 struct cgroup *cont)
1047 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1048 mem_cgroup_force_empty(mem);
1051 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1052 struct cgroup *cont)
1055 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1057 for_each_node_state(node, N_POSSIBLE)
1058 free_mem_cgroup_per_zone_info(mem, node);
1060 mem_cgroup_free(mem_cgroup_from_cont(cont));
1063 static int mem_cgroup_populate(struct cgroup_subsys *ss,
1064 struct cgroup *cont)
1066 if (mem_cgroup_subsys.disabled)
1068 return cgroup_add_files(cont, ss, mem_cgroup_files,
1069 ARRAY_SIZE(mem_cgroup_files));
1072 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1073 struct cgroup *cont,
1074 struct cgroup *old_cont,
1075 struct task_struct *p)
1077 struct mm_struct *mm;
1078 struct mem_cgroup *mem, *old_mem;
1080 if (mem_cgroup_subsys.disabled)
1083 mm = get_task_mm(p);
1087 mem = mem_cgroup_from_cont(cont);
1088 old_mem = mem_cgroup_from_cont(old_cont);
1094 * Only thread group leaders are allowed to migrate, the mm_struct is
1095 * in effect owned by the leader
1097 if (!thread_group_leader(p))
1104 struct cgroup_subsys mem_cgroup_subsys = {
1106 .subsys_id = mem_cgroup_subsys_id,
1107 .create = mem_cgroup_create,
1108 .pre_destroy = mem_cgroup_pre_destroy,
1109 .destroy = mem_cgroup_destroy,
1110 .populate = mem_cgroup_populate,
1111 .attach = mem_cgroup_move_task,