]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - mm/memcontrol.c
memcg: remove redundant message at swapon
[linux-2.6-omap-h63xx.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/swap.h>
34 #include <linux/spinlock.h>
35 #include <linux/fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/vmalloc.h>
38 #include <linux/mm_inline.h>
39 #include <linux/page_cgroup.h>
40 #include "internal.h"
41
42 #include <asm/uaccess.h>
43
44 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
45 #define MEM_CGROUP_RECLAIM_RETRIES      5
46
47 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
48 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
49 int do_swap_account __read_mostly;
50 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
51 #else
52 #define do_swap_account         (0)
53 #endif
54
55 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
56
57 /*
58  * Statistics for memory cgroup.
59  */
60 enum mem_cgroup_stat_index {
61         /*
62          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
63          */
64         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
65         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
66         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
67         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
68
69         MEM_CGROUP_STAT_NSTATS,
70 };
71
72 struct mem_cgroup_stat_cpu {
73         s64 count[MEM_CGROUP_STAT_NSTATS];
74 } ____cacheline_aligned_in_smp;
75
76 struct mem_cgroup_stat {
77         struct mem_cgroup_stat_cpu cpustat[0];
78 };
79
80 /*
81  * For accounting under irq disable, no need for increment preempt count.
82  */
83 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
84                 enum mem_cgroup_stat_index idx, int val)
85 {
86         stat->count[idx] += val;
87 }
88
89 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
90                 enum mem_cgroup_stat_index idx)
91 {
92         int cpu;
93         s64 ret = 0;
94         for_each_possible_cpu(cpu)
95                 ret += stat->cpustat[cpu].count[idx];
96         return ret;
97 }
98
99 static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
100 {
101         s64 ret;
102
103         ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
104         ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
105         return ret;
106 }
107
108 /*
109  * per-zone information in memory controller.
110  */
111 struct mem_cgroup_per_zone {
112         /*
113          * spin_lock to protect the per cgroup LRU
114          */
115         struct list_head        lists[NR_LRU_LISTS];
116         unsigned long           count[NR_LRU_LISTS];
117
118         struct zone_reclaim_stat reclaim_stat;
119 };
120 /* Macro for accessing counter */
121 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
122
123 struct mem_cgroup_per_node {
124         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
125 };
126
127 struct mem_cgroup_lru_info {
128         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
129 };
130
131 /*
132  * The memory controller data structure. The memory controller controls both
133  * page cache and RSS per cgroup. We would eventually like to provide
134  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
135  * to help the administrator determine what knobs to tune.
136  *
137  * TODO: Add a water mark for the memory controller. Reclaim will begin when
138  * we hit the water mark. May be even add a low water mark, such that
139  * no reclaim occurs from a cgroup at it's low water mark, this is
140  * a feature that will be implemented much later in the future.
141  */
142 struct mem_cgroup {
143         struct cgroup_subsys_state css;
144         /*
145          * the counter to account for memory usage
146          */
147         struct res_counter res;
148         /*
149          * the counter to account for mem+swap usage.
150          */
151         struct res_counter memsw;
152         /*
153          * Per cgroup active and inactive list, similar to the
154          * per zone LRU lists.
155          */
156         struct mem_cgroup_lru_info info;
157
158         /*
159           protect against reclaim related member.
160         */
161         spinlock_t reclaim_param_lock;
162
163         int     prev_priority;  /* for recording reclaim priority */
164
165         /*
166          * While reclaiming in a hiearchy, we cache the last child we
167          * reclaimed from.
168          */
169         int last_scanned_child;
170         /*
171          * Should the accounting and control be hierarchical, per subtree?
172          */
173         bool use_hierarchy;
174         unsigned long   last_oom_jiffies;
175         atomic_t        refcnt;
176
177         unsigned int    swappiness;
178
179         /*
180          * statistics. This must be placed at the end of memcg.
181          */
182         struct mem_cgroup_stat stat;
183 };
184
185 enum charge_type {
186         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
187         MEM_CGROUP_CHARGE_TYPE_MAPPED,
188         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
189         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
190         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
191         NR_CHARGE_TYPE,
192 };
193
194 /* only for here (for easy reading.) */
195 #define PCGF_CACHE      (1UL << PCG_CACHE)
196 #define PCGF_USED       (1UL << PCG_USED)
197 #define PCGF_LOCK       (1UL << PCG_LOCK)
198 static const unsigned long
199 pcg_default_flags[NR_CHARGE_TYPE] = {
200         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
201         PCGF_USED | PCGF_LOCK, /* Anon */
202         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
203         0, /* FORCE */
204 };
205
206 /* for encoding cft->private value on file */
207 #define _MEM                    (0)
208 #define _MEMSWAP                (1)
209 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
210 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
211 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
212
213 static void mem_cgroup_get(struct mem_cgroup *mem);
214 static void mem_cgroup_put(struct mem_cgroup *mem);
215 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
216
217 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
218                                          struct page_cgroup *pc,
219                                          bool charge)
220 {
221         int val = (charge)? 1 : -1;
222         struct mem_cgroup_stat *stat = &mem->stat;
223         struct mem_cgroup_stat_cpu *cpustat;
224         int cpu = get_cpu();
225
226         cpustat = &stat->cpustat[cpu];
227         if (PageCgroupCache(pc))
228                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
229         else
230                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
231
232         if (charge)
233                 __mem_cgroup_stat_add_safe(cpustat,
234                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
235         else
236                 __mem_cgroup_stat_add_safe(cpustat,
237                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
238         put_cpu();
239 }
240
241 static struct mem_cgroup_per_zone *
242 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
243 {
244         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
245 }
246
247 static struct mem_cgroup_per_zone *
248 page_cgroup_zoneinfo(struct page_cgroup *pc)
249 {
250         struct mem_cgroup *mem = pc->mem_cgroup;
251         int nid = page_cgroup_nid(pc);
252         int zid = page_cgroup_zid(pc);
253
254         if (!mem)
255                 return NULL;
256
257         return mem_cgroup_zoneinfo(mem, nid, zid);
258 }
259
260 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
261                                         enum lru_list idx)
262 {
263         int nid, zid;
264         struct mem_cgroup_per_zone *mz;
265         u64 total = 0;
266
267         for_each_online_node(nid)
268                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
269                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
270                         total += MEM_CGROUP_ZSTAT(mz, idx);
271                 }
272         return total;
273 }
274
275 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
276 {
277         return container_of(cgroup_subsys_state(cont,
278                                 mem_cgroup_subsys_id), struct mem_cgroup,
279                                 css);
280 }
281
282 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
283 {
284         /*
285          * mm_update_next_owner() may clear mm->owner to NULL
286          * if it races with swapoff, page migration, etc.
287          * So this can be called with p == NULL.
288          */
289         if (unlikely(!p))
290                 return NULL;
291
292         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
293                                 struct mem_cgroup, css);
294 }
295
296 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
297 {
298         struct mem_cgroup *mem = NULL;
299
300         if (!mm)
301                 return NULL;
302         /*
303          * Because we have no locks, mm->owner's may be being moved to other
304          * cgroup. We use css_tryget() here even if this looks
305          * pessimistic (rather than adding locks here).
306          */
307         rcu_read_lock();
308         do {
309                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
310                 if (unlikely(!mem))
311                         break;
312         } while (!css_tryget(&mem->css));
313         rcu_read_unlock();
314         return mem;
315 }
316
317 static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
318 {
319         if (!mem)
320                 return true;
321         return css_is_removed(&mem->css);
322 }
323
324
325 /*
326  * Call callback function against all cgroup under hierarchy tree.
327  */
328 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
329                           int (*func)(struct mem_cgroup *, void *))
330 {
331         int found, ret, nextid;
332         struct cgroup_subsys_state *css;
333         struct mem_cgroup *mem;
334
335         if (!root->use_hierarchy)
336                 return (*func)(root, data);
337
338         nextid = 1;
339         do {
340                 ret = 0;
341                 mem = NULL;
342
343                 rcu_read_lock();
344                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
345                                    &found);
346                 if (css && css_tryget(css))
347                         mem = container_of(css, struct mem_cgroup, css);
348                 rcu_read_unlock();
349
350                 if (mem) {
351                         ret = (*func)(mem, data);
352                         css_put(&mem->css);
353                 }
354                 nextid = found + 1;
355         } while (!ret && css);
356
357         return ret;
358 }
359
360 /*
361  * Following LRU functions are allowed to be used without PCG_LOCK.
362  * Operations are called by routine of global LRU independently from memcg.
363  * What we have to take care of here is validness of pc->mem_cgroup.
364  *
365  * Changes to pc->mem_cgroup happens when
366  * 1. charge
367  * 2. moving account
368  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
369  * It is added to LRU before charge.
370  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
371  * When moving account, the page is not on LRU. It's isolated.
372  */
373
374 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
375 {
376         struct page_cgroup *pc;
377         struct mem_cgroup *mem;
378         struct mem_cgroup_per_zone *mz;
379
380         if (mem_cgroup_disabled())
381                 return;
382         pc = lookup_page_cgroup(page);
383         /* can happen while we handle swapcache. */
384         if (list_empty(&pc->lru) || !pc->mem_cgroup)
385                 return;
386         /*
387          * We don't check PCG_USED bit. It's cleared when the "page" is finally
388          * removed from global LRU.
389          */
390         mz = page_cgroup_zoneinfo(pc);
391         mem = pc->mem_cgroup;
392         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
393         list_del_init(&pc->lru);
394         return;
395 }
396
397 void mem_cgroup_del_lru(struct page *page)
398 {
399         mem_cgroup_del_lru_list(page, page_lru(page));
400 }
401
402 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
403 {
404         struct mem_cgroup_per_zone *mz;
405         struct page_cgroup *pc;
406
407         if (mem_cgroup_disabled())
408                 return;
409
410         pc = lookup_page_cgroup(page);
411         /*
412          * Used bit is set without atomic ops but after smp_wmb().
413          * For making pc->mem_cgroup visible, insert smp_rmb() here.
414          */
415         smp_rmb();
416         /* unused page is not rotated. */
417         if (!PageCgroupUsed(pc))
418                 return;
419         mz = page_cgroup_zoneinfo(pc);
420         list_move(&pc->lru, &mz->lists[lru]);
421 }
422
423 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
424 {
425         struct page_cgroup *pc;
426         struct mem_cgroup_per_zone *mz;
427
428         if (mem_cgroup_disabled())
429                 return;
430         pc = lookup_page_cgroup(page);
431         /*
432          * Used bit is set without atomic ops but after smp_wmb().
433          * For making pc->mem_cgroup visible, insert smp_rmb() here.
434          */
435         smp_rmb();
436         if (!PageCgroupUsed(pc))
437                 return;
438
439         mz = page_cgroup_zoneinfo(pc);
440         MEM_CGROUP_ZSTAT(mz, lru) += 1;
441         list_add(&pc->lru, &mz->lists[lru]);
442 }
443
444 /*
445  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
446  * lru because the page may.be reused after it's fully uncharged (because of
447  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
448  * it again. This function is only used to charge SwapCache. It's done under
449  * lock_page and expected that zone->lru_lock is never held.
450  */
451 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
452 {
453         unsigned long flags;
454         struct zone *zone = page_zone(page);
455         struct page_cgroup *pc = lookup_page_cgroup(page);
456
457         spin_lock_irqsave(&zone->lru_lock, flags);
458         /*
459          * Forget old LRU when this page_cgroup is *not* used. This Used bit
460          * is guarded by lock_page() because the page is SwapCache.
461          */
462         if (!PageCgroupUsed(pc))
463                 mem_cgroup_del_lru_list(page, page_lru(page));
464         spin_unlock_irqrestore(&zone->lru_lock, flags);
465 }
466
467 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
468 {
469         unsigned long flags;
470         struct zone *zone = page_zone(page);
471         struct page_cgroup *pc = lookup_page_cgroup(page);
472
473         spin_lock_irqsave(&zone->lru_lock, flags);
474         /* link when the page is linked to LRU but page_cgroup isn't */
475         if (PageLRU(page) && list_empty(&pc->lru))
476                 mem_cgroup_add_lru_list(page, page_lru(page));
477         spin_unlock_irqrestore(&zone->lru_lock, flags);
478 }
479
480
481 void mem_cgroup_move_lists(struct page *page,
482                            enum lru_list from, enum lru_list to)
483 {
484         if (mem_cgroup_disabled())
485                 return;
486         mem_cgroup_del_lru_list(page, from);
487         mem_cgroup_add_lru_list(page, to);
488 }
489
490 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
491 {
492         int ret;
493         struct mem_cgroup *curr = NULL;
494
495         task_lock(task);
496         rcu_read_lock();
497         curr = try_get_mem_cgroup_from_mm(task->mm);
498         rcu_read_unlock();
499         task_unlock(task);
500         if (!curr)
501                 return 0;
502         if (curr->use_hierarchy)
503                 ret = css_is_ancestor(&curr->css, &mem->css);
504         else
505                 ret = (curr == mem);
506         css_put(&curr->css);
507         return ret;
508 }
509
510 /*
511  * prev_priority control...this will be used in memory reclaim path.
512  */
513 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
514 {
515         int prev_priority;
516
517         spin_lock(&mem->reclaim_param_lock);
518         prev_priority = mem->prev_priority;
519         spin_unlock(&mem->reclaim_param_lock);
520
521         return prev_priority;
522 }
523
524 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
525 {
526         spin_lock(&mem->reclaim_param_lock);
527         if (priority < mem->prev_priority)
528                 mem->prev_priority = priority;
529         spin_unlock(&mem->reclaim_param_lock);
530 }
531
532 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
533 {
534         spin_lock(&mem->reclaim_param_lock);
535         mem->prev_priority = priority;
536         spin_unlock(&mem->reclaim_param_lock);
537 }
538
539 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
540 {
541         unsigned long active;
542         unsigned long inactive;
543         unsigned long gb;
544         unsigned long inactive_ratio;
545
546         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
547         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
548
549         gb = (inactive + active) >> (30 - PAGE_SHIFT);
550         if (gb)
551                 inactive_ratio = int_sqrt(10 * gb);
552         else
553                 inactive_ratio = 1;
554
555         if (present_pages) {
556                 present_pages[0] = inactive;
557                 present_pages[1] = active;
558         }
559
560         return inactive_ratio;
561 }
562
563 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
564 {
565         unsigned long active;
566         unsigned long inactive;
567         unsigned long present_pages[2];
568         unsigned long inactive_ratio;
569
570         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
571
572         inactive = present_pages[0];
573         active = present_pages[1];
574
575         if (inactive * inactive_ratio < active)
576                 return 1;
577
578         return 0;
579 }
580
581 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
582                                        struct zone *zone,
583                                        enum lru_list lru)
584 {
585         int nid = zone->zone_pgdat->node_id;
586         int zid = zone_idx(zone);
587         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
588
589         return MEM_CGROUP_ZSTAT(mz, lru);
590 }
591
592 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
593                                                       struct zone *zone)
594 {
595         int nid = zone->zone_pgdat->node_id;
596         int zid = zone_idx(zone);
597         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
598
599         return &mz->reclaim_stat;
600 }
601
602 struct zone_reclaim_stat *
603 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
604 {
605         struct page_cgroup *pc;
606         struct mem_cgroup_per_zone *mz;
607
608         if (mem_cgroup_disabled())
609                 return NULL;
610
611         pc = lookup_page_cgroup(page);
612         /*
613          * Used bit is set without atomic ops but after smp_wmb().
614          * For making pc->mem_cgroup visible, insert smp_rmb() here.
615          */
616         smp_rmb();
617         if (!PageCgroupUsed(pc))
618                 return NULL;
619
620         mz = page_cgroup_zoneinfo(pc);
621         if (!mz)
622                 return NULL;
623
624         return &mz->reclaim_stat;
625 }
626
627 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
628                                         struct list_head *dst,
629                                         unsigned long *scanned, int order,
630                                         int mode, struct zone *z,
631                                         struct mem_cgroup *mem_cont,
632                                         int active, int file)
633 {
634         unsigned long nr_taken = 0;
635         struct page *page;
636         unsigned long scan;
637         LIST_HEAD(pc_list);
638         struct list_head *src;
639         struct page_cgroup *pc, *tmp;
640         int nid = z->zone_pgdat->node_id;
641         int zid = zone_idx(z);
642         struct mem_cgroup_per_zone *mz;
643         int lru = LRU_FILE * !!file + !!active;
644
645         BUG_ON(!mem_cont);
646         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
647         src = &mz->lists[lru];
648
649         scan = 0;
650         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
651                 if (scan >= nr_to_scan)
652                         break;
653
654                 page = pc->page;
655                 if (unlikely(!PageCgroupUsed(pc)))
656                         continue;
657                 if (unlikely(!PageLRU(page)))
658                         continue;
659
660                 scan++;
661                 if (__isolate_lru_page(page, mode, file) == 0) {
662                         list_move(&page->lru, dst);
663                         nr_taken++;
664                 }
665         }
666
667         *scanned = scan;
668         return nr_taken;
669 }
670
671 #define mem_cgroup_from_res_counter(counter, member)    \
672         container_of(counter, struct mem_cgroup, member)
673
674 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
675 {
676         if (do_swap_account) {
677                 if (res_counter_check_under_limit(&mem->res) &&
678                         res_counter_check_under_limit(&mem->memsw))
679                         return true;
680         } else
681                 if (res_counter_check_under_limit(&mem->res))
682                         return true;
683         return false;
684 }
685
686 static unsigned int get_swappiness(struct mem_cgroup *memcg)
687 {
688         struct cgroup *cgrp = memcg->css.cgroup;
689         unsigned int swappiness;
690
691         /* root ? */
692         if (cgrp->parent == NULL)
693                 return vm_swappiness;
694
695         spin_lock(&memcg->reclaim_param_lock);
696         swappiness = memcg->swappiness;
697         spin_unlock(&memcg->reclaim_param_lock);
698
699         return swappiness;
700 }
701
702 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
703 {
704         int *val = data;
705         (*val)++;
706         return 0;
707 }
708
709 /**
710  * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
711  * @memcg: The memory cgroup that went over limit
712  * @p: Task that is going to be killed
713  *
714  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
715  * enabled
716  */
717 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
718 {
719         struct cgroup *task_cgrp;
720         struct cgroup *mem_cgrp;
721         /*
722          * Need a buffer in BSS, can't rely on allocations. The code relies
723          * on the assumption that OOM is serialized for memory controller.
724          * If this assumption is broken, revisit this code.
725          */
726         static char memcg_name[PATH_MAX];
727         int ret;
728
729         if (!memcg)
730                 return;
731
732
733         rcu_read_lock();
734
735         mem_cgrp = memcg->css.cgroup;
736         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
737
738         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
739         if (ret < 0) {
740                 /*
741                  * Unfortunately, we are unable to convert to a useful name
742                  * But we'll still print out the usage information
743                  */
744                 rcu_read_unlock();
745                 goto done;
746         }
747         rcu_read_unlock();
748
749         printk(KERN_INFO "Task in %s killed", memcg_name);
750
751         rcu_read_lock();
752         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
753         if (ret < 0) {
754                 rcu_read_unlock();
755                 goto done;
756         }
757         rcu_read_unlock();
758
759         /*
760          * Continues from above, so we don't need an KERN_ level
761          */
762         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
763 done:
764
765         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
766                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
767                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
768                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
769         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
770                 "failcnt %llu\n",
771                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
772                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
773                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
774 }
775
776 /*
777  * This function returns the number of memcg under hierarchy tree. Returns
778  * 1(self count) if no children.
779  */
780 static int mem_cgroup_count_children(struct mem_cgroup *mem)
781 {
782         int num = 0;
783         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
784         return num;
785 }
786
787 /*
788  * Visit the first child (need not be the first child as per the ordering
789  * of the cgroup list, since we track last_scanned_child) of @mem and use
790  * that to reclaim free pages from.
791  */
792 static struct mem_cgroup *
793 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
794 {
795         struct mem_cgroup *ret = NULL;
796         struct cgroup_subsys_state *css;
797         int nextid, found;
798
799         if (!root_mem->use_hierarchy) {
800                 css_get(&root_mem->css);
801                 ret = root_mem;
802         }
803
804         while (!ret) {
805                 rcu_read_lock();
806                 nextid = root_mem->last_scanned_child + 1;
807                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
808                                    &found);
809                 if (css && css_tryget(css))
810                         ret = container_of(css, struct mem_cgroup, css);
811
812                 rcu_read_unlock();
813                 /* Updates scanning parameter */
814                 spin_lock(&root_mem->reclaim_param_lock);
815                 if (!css) {
816                         /* this means start scan from ID:1 */
817                         root_mem->last_scanned_child = 0;
818                 } else
819                         root_mem->last_scanned_child = found;
820                 spin_unlock(&root_mem->reclaim_param_lock);
821         }
822
823         return ret;
824 }
825
826 /*
827  * Scan the hierarchy if needed to reclaim memory. We remember the last child
828  * we reclaimed from, so that we don't end up penalizing one child extensively
829  * based on its position in the children list.
830  *
831  * root_mem is the original ancestor that we've been reclaim from.
832  *
833  * We give up and return to the caller when we visit root_mem twice.
834  * (other groups can be removed while we're walking....)
835  *
836  * If shrink==true, for avoiding to free too much, this returns immedieately.
837  */
838 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
839                                    gfp_t gfp_mask, bool noswap, bool shrink)
840 {
841         struct mem_cgroup *victim;
842         int ret, total = 0;
843         int loop = 0;
844
845         while (loop < 2) {
846                 victim = mem_cgroup_select_victim(root_mem);
847                 if (victim == root_mem)
848                         loop++;
849                 if (!mem_cgroup_local_usage(&victim->stat)) {
850                         /* this cgroup's local usage == 0 */
851                         css_put(&victim->css);
852                         continue;
853                 }
854                 /* we use swappiness of local cgroup */
855                 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
856                                                    get_swappiness(victim));
857                 css_put(&victim->css);
858                 /*
859                  * At shrinking usage, we can't check we should stop here or
860                  * reclaim more. It's depends on callers. last_scanned_child
861                  * will work enough for keeping fairness under tree.
862                  */
863                 if (shrink)
864                         return ret;
865                 total += ret;
866                 if (mem_cgroup_check_under_limit(root_mem))
867                         return 1 + total;
868         }
869         return total;
870 }
871
872 bool mem_cgroup_oom_called(struct task_struct *task)
873 {
874         bool ret = false;
875         struct mem_cgroup *mem;
876         struct mm_struct *mm;
877
878         rcu_read_lock();
879         mm = task->mm;
880         if (!mm)
881                 mm = &init_mm;
882         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
883         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
884                 ret = true;
885         rcu_read_unlock();
886         return ret;
887 }
888
889 static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
890 {
891         mem->last_oom_jiffies = jiffies;
892         return 0;
893 }
894
895 static void record_last_oom(struct mem_cgroup *mem)
896 {
897         mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
898 }
899
900
901 /*
902  * Unlike exported interface, "oom" parameter is added. if oom==true,
903  * oom-killer can be invoked.
904  */
905 static int __mem_cgroup_try_charge(struct mm_struct *mm,
906                         gfp_t gfp_mask, struct mem_cgroup **memcg,
907                         bool oom)
908 {
909         struct mem_cgroup *mem, *mem_over_limit;
910         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
911         struct res_counter *fail_res;
912
913         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
914                 /* Don't account this! */
915                 *memcg = NULL;
916                 return 0;
917         }
918
919         /*
920          * We always charge the cgroup the mm_struct belongs to.
921          * The mm_struct's mem_cgroup changes on task migration if the
922          * thread group leader migrates. It's possible that mm is not
923          * set, if so charge the init_mm (happens for pagecache usage).
924          */
925         mem = *memcg;
926         if (likely(!mem)) {
927                 mem = try_get_mem_cgroup_from_mm(mm);
928                 *memcg = mem;
929         } else {
930                 css_get(&mem->css);
931         }
932         if (unlikely(!mem))
933                 return 0;
934
935         VM_BUG_ON(mem_cgroup_is_obsolete(mem));
936
937         while (1) {
938                 int ret;
939                 bool noswap = false;
940
941                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
942                 if (likely(!ret)) {
943                         if (!do_swap_account)
944                                 break;
945                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
946                                                         &fail_res);
947                         if (likely(!ret))
948                                 break;
949                         /* mem+swap counter fails */
950                         res_counter_uncharge(&mem->res, PAGE_SIZE);
951                         noswap = true;
952                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
953                                                                         memsw);
954                 } else
955                         /* mem counter fails */
956                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
957                                                                         res);
958
959                 if (!(gfp_mask & __GFP_WAIT))
960                         goto nomem;
961
962                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
963                                                         noswap, false);
964                 if (ret)
965                         continue;
966
967                 /*
968                  * try_to_free_mem_cgroup_pages() might not give us a full
969                  * picture of reclaim. Some pages are reclaimed and might be
970                  * moved to swap cache or just unmapped from the cgroup.
971                  * Check the limit again to see if the reclaim reduced the
972                  * current usage of the cgroup before giving up
973                  *
974                  */
975                 if (mem_cgroup_check_under_limit(mem_over_limit))
976                         continue;
977
978                 if (!nr_retries--) {
979                         if (oom) {
980                                 mutex_lock(&memcg_tasklist);
981                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
982                                 mutex_unlock(&memcg_tasklist);
983                                 record_last_oom(mem_over_limit);
984                         }
985                         goto nomem;
986                 }
987         }
988         return 0;
989 nomem:
990         css_put(&mem->css);
991         return -ENOMEM;
992 }
993
994
995 /*
996  * A helper function to get mem_cgroup from ID. must be called under
997  * rcu_read_lock(). The caller must check css_is_removed() or some if
998  * it's concern. (dropping refcnt from swap can be called against removed
999  * memcg.)
1000  */
1001 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1002 {
1003         struct cgroup_subsys_state *css;
1004
1005         /* ID 0 is unused ID */
1006         if (!id)
1007                 return NULL;
1008         css = css_lookup(&mem_cgroup_subsys, id);
1009         if (!css)
1010                 return NULL;
1011         return container_of(css, struct mem_cgroup, css);
1012 }
1013
1014 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
1015 {
1016         struct mem_cgroup *mem;
1017         struct page_cgroup *pc;
1018         unsigned short id;
1019         swp_entry_t ent;
1020
1021         VM_BUG_ON(!PageLocked(page));
1022
1023         if (!PageSwapCache(page))
1024                 return NULL;
1025
1026         pc = lookup_page_cgroup(page);
1027         /*
1028          * Used bit of swapcache is solid under page lock.
1029          */
1030         if (PageCgroupUsed(pc)) {
1031                 mem = pc->mem_cgroup;
1032                 if (mem && !css_tryget(&mem->css))
1033                         mem = NULL;
1034         } else {
1035                 ent.val = page_private(page);
1036                 id = lookup_swap_cgroup(ent);
1037                 rcu_read_lock();
1038                 mem = mem_cgroup_lookup(id);
1039                 if (mem && !css_tryget(&mem->css))
1040                         mem = NULL;
1041                 rcu_read_unlock();
1042         }
1043         return mem;
1044 }
1045
1046 /*
1047  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1048  * USED state. If already USED, uncharge and return.
1049  */
1050
1051 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1052                                      struct page_cgroup *pc,
1053                                      enum charge_type ctype)
1054 {
1055         /* try_charge() can return NULL to *memcg, taking care of it. */
1056         if (!mem)
1057                 return;
1058
1059         lock_page_cgroup(pc);
1060         if (unlikely(PageCgroupUsed(pc))) {
1061                 unlock_page_cgroup(pc);
1062                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1063                 if (do_swap_account)
1064                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1065                 css_put(&mem->css);
1066                 return;
1067         }
1068         pc->mem_cgroup = mem;
1069         smp_wmb();
1070         pc->flags = pcg_default_flags[ctype];
1071
1072         mem_cgroup_charge_statistics(mem, pc, true);
1073
1074         unlock_page_cgroup(pc);
1075 }
1076
1077 /**
1078  * mem_cgroup_move_account - move account of the page
1079  * @pc: page_cgroup of the page.
1080  * @from: mem_cgroup which the page is moved from.
1081  * @to: mem_cgroup which the page is moved to. @from != @to.
1082  *
1083  * The caller must confirm following.
1084  * - page is not on LRU (isolate_page() is useful.)
1085  *
1086  * returns 0 at success,
1087  * returns -EBUSY when lock is busy or "pc" is unstable.
1088  *
1089  * This function does "uncharge" from old cgroup but doesn't do "charge" to
1090  * new cgroup. It should be done by a caller.
1091  */
1092
1093 static int mem_cgroup_move_account(struct page_cgroup *pc,
1094         struct mem_cgroup *from, struct mem_cgroup *to)
1095 {
1096         struct mem_cgroup_per_zone *from_mz, *to_mz;
1097         int nid, zid;
1098         int ret = -EBUSY;
1099
1100         VM_BUG_ON(from == to);
1101         VM_BUG_ON(PageLRU(pc->page));
1102
1103         nid = page_cgroup_nid(pc);
1104         zid = page_cgroup_zid(pc);
1105         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1106         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1107
1108         if (!trylock_page_cgroup(pc))
1109                 return ret;
1110
1111         if (!PageCgroupUsed(pc))
1112                 goto out;
1113
1114         if (pc->mem_cgroup != from)
1115                 goto out;
1116
1117         res_counter_uncharge(&from->res, PAGE_SIZE);
1118         mem_cgroup_charge_statistics(from, pc, false);
1119         if (do_swap_account)
1120                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1121         css_put(&from->css);
1122
1123         css_get(&to->css);
1124         pc->mem_cgroup = to;
1125         mem_cgroup_charge_statistics(to, pc, true);
1126         ret = 0;
1127 out:
1128         unlock_page_cgroup(pc);
1129         return ret;
1130 }
1131
1132 /*
1133  * move charges to its parent.
1134  */
1135
1136 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1137                                   struct mem_cgroup *child,
1138                                   gfp_t gfp_mask)
1139 {
1140         struct page *page = pc->page;
1141         struct cgroup *cg = child->css.cgroup;
1142         struct cgroup *pcg = cg->parent;
1143         struct mem_cgroup *parent;
1144         int ret;
1145
1146         /* Is ROOT ? */
1147         if (!pcg)
1148                 return -EINVAL;
1149
1150
1151         parent = mem_cgroup_from_cont(pcg);
1152
1153
1154         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1155         if (ret || !parent)
1156                 return ret;
1157
1158         if (!get_page_unless_zero(page)) {
1159                 ret = -EBUSY;
1160                 goto uncharge;
1161         }
1162
1163         ret = isolate_lru_page(page);
1164
1165         if (ret)
1166                 goto cancel;
1167
1168         ret = mem_cgroup_move_account(pc, child, parent);
1169
1170         putback_lru_page(page);
1171         if (!ret) {
1172                 put_page(page);
1173                 /* drop extra refcnt by try_charge() */
1174                 css_put(&parent->css);
1175                 return 0;
1176         }
1177
1178 cancel:
1179         put_page(page);
1180 uncharge:
1181         /* drop extra refcnt by try_charge() */
1182         css_put(&parent->css);
1183         /* uncharge if move fails */
1184         res_counter_uncharge(&parent->res, PAGE_SIZE);
1185         if (do_swap_account)
1186                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1187         return ret;
1188 }
1189
1190 /*
1191  * Charge the memory controller for page usage.
1192  * Return
1193  * 0 if the charge was successful
1194  * < 0 if the cgroup is over its limit
1195  */
1196 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1197                                 gfp_t gfp_mask, enum charge_type ctype,
1198                                 struct mem_cgroup *memcg)
1199 {
1200         struct mem_cgroup *mem;
1201         struct page_cgroup *pc;
1202         int ret;
1203
1204         pc = lookup_page_cgroup(page);
1205         /* can happen at boot */
1206         if (unlikely(!pc))
1207                 return 0;
1208         prefetchw(pc);
1209
1210         mem = memcg;
1211         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1212         if (ret || !mem)
1213                 return ret;
1214
1215         __mem_cgroup_commit_charge(mem, pc, ctype);
1216         return 0;
1217 }
1218
1219 int mem_cgroup_newpage_charge(struct page *page,
1220                               struct mm_struct *mm, gfp_t gfp_mask)
1221 {
1222         if (mem_cgroup_disabled())
1223                 return 0;
1224         if (PageCompound(page))
1225                 return 0;
1226         /*
1227          * If already mapped, we don't have to account.
1228          * If page cache, page->mapping has address_space.
1229          * But page->mapping may have out-of-use anon_vma pointer,
1230          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1231          * is NULL.
1232          */
1233         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1234                 return 0;
1235         if (unlikely(!mm))
1236                 mm = &init_mm;
1237         return mem_cgroup_charge_common(page, mm, gfp_mask,
1238                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1239 }
1240
1241 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1242                                 gfp_t gfp_mask)
1243 {
1244         struct mem_cgroup *mem = NULL;
1245         int ret;
1246
1247         if (mem_cgroup_disabled())
1248                 return 0;
1249         if (PageCompound(page))
1250                 return 0;
1251         /*
1252          * Corner case handling. This is called from add_to_page_cache()
1253          * in usual. But some FS (shmem) precharges this page before calling it
1254          * and call add_to_page_cache() with GFP_NOWAIT.
1255          *
1256          * For GFP_NOWAIT case, the page may be pre-charged before calling
1257          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1258          * charge twice. (It works but has to pay a bit larger cost.)
1259          * And when the page is SwapCache, it should take swap information
1260          * into account. This is under lock_page() now.
1261          */
1262         if (!(gfp_mask & __GFP_WAIT)) {
1263                 struct page_cgroup *pc;
1264
1265
1266                 pc = lookup_page_cgroup(page);
1267                 if (!pc)
1268                         return 0;
1269                 lock_page_cgroup(pc);
1270                 if (PageCgroupUsed(pc)) {
1271                         unlock_page_cgroup(pc);
1272                         return 0;
1273                 }
1274                 unlock_page_cgroup(pc);
1275         }
1276
1277         if (do_swap_account && PageSwapCache(page)) {
1278                 mem = try_get_mem_cgroup_from_swapcache(page);
1279                 if (mem)
1280                         mm = NULL;
1281                   else
1282                         mem = NULL;
1283                 /* SwapCache may be still linked to LRU now. */
1284                 mem_cgroup_lru_del_before_commit_swapcache(page);
1285         }
1286
1287         if (unlikely(!mm && !mem))
1288                 mm = &init_mm;
1289
1290         if (page_is_file_cache(page))
1291                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1292                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1293
1294         ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1295                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1296         if (mem)
1297                 css_put(&mem->css);
1298         if (PageSwapCache(page))
1299                 mem_cgroup_lru_add_after_commit_swapcache(page);
1300
1301         if (do_swap_account && !ret && PageSwapCache(page)) {
1302                 swp_entry_t ent = {.val = page_private(page)};
1303                 unsigned short id;
1304                 /* avoid double counting */
1305                 id = swap_cgroup_record(ent, 0);
1306                 rcu_read_lock();
1307                 mem = mem_cgroup_lookup(id);
1308                 if (mem) {
1309                         /*
1310                          * We did swap-in. Then, this entry is doubly counted
1311                          * both in mem and memsw. We uncharge it, here.
1312                          * Recorded ID can be obsolete. We avoid calling
1313                          * css_tryget()
1314                          */
1315                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1316                         mem_cgroup_put(mem);
1317                 }
1318                 rcu_read_unlock();
1319         }
1320         return ret;
1321 }
1322
1323 /*
1324  * While swap-in, try_charge -> commit or cancel, the page is locked.
1325  * And when try_charge() successfully returns, one refcnt to memcg without
1326  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1327  * "commit()" or removed by "cancel()"
1328  */
1329 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1330                                  struct page *page,
1331                                  gfp_t mask, struct mem_cgroup **ptr)
1332 {
1333         struct mem_cgroup *mem;
1334         int ret;
1335
1336         if (mem_cgroup_disabled())
1337                 return 0;
1338
1339         if (!do_swap_account)
1340                 goto charge_cur_mm;
1341         /*
1342          * A racing thread's fault, or swapoff, may have already updated
1343          * the pte, and even removed page from swap cache: return success
1344          * to go on to do_swap_page()'s pte_same() test, which should fail.
1345          */
1346         if (!PageSwapCache(page))
1347                 return 0;
1348         mem = try_get_mem_cgroup_from_swapcache(page);
1349         if (!mem)
1350                 goto charge_cur_mm;
1351         *ptr = mem;
1352         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1353         /* drop extra refcnt from tryget */
1354         css_put(&mem->css);
1355         return ret;
1356 charge_cur_mm:
1357         if (unlikely(!mm))
1358                 mm = &init_mm;
1359         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1360 }
1361
1362 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1363 {
1364         struct page_cgroup *pc;
1365
1366         if (mem_cgroup_disabled())
1367                 return;
1368         if (!ptr)
1369                 return;
1370         pc = lookup_page_cgroup(page);
1371         mem_cgroup_lru_del_before_commit_swapcache(page);
1372         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1373         mem_cgroup_lru_add_after_commit_swapcache(page);
1374         /*
1375          * Now swap is on-memory. This means this page may be
1376          * counted both as mem and swap....double count.
1377          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1378          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1379          * may call delete_from_swap_cache() before reach here.
1380          */
1381         if (do_swap_account && PageSwapCache(page)) {
1382                 swp_entry_t ent = {.val = page_private(page)};
1383                 unsigned short id;
1384                 struct mem_cgroup *memcg;
1385
1386                 id = swap_cgroup_record(ent, 0);
1387                 rcu_read_lock();
1388                 memcg = mem_cgroup_lookup(id);
1389                 if (memcg) {
1390                         /*
1391                          * This recorded memcg can be obsolete one. So, avoid
1392                          * calling css_tryget
1393                          */
1394                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1395                         mem_cgroup_put(memcg);
1396                 }
1397                 rcu_read_unlock();
1398         }
1399         /* add this page(page_cgroup) to the LRU we want. */
1400
1401 }
1402
1403 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1404 {
1405         if (mem_cgroup_disabled())
1406                 return;
1407         if (!mem)
1408                 return;
1409         res_counter_uncharge(&mem->res, PAGE_SIZE);
1410         if (do_swap_account)
1411                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1412         css_put(&mem->css);
1413 }
1414
1415
1416 /*
1417  * uncharge if !page_mapped(page)
1418  */
1419 static struct mem_cgroup *
1420 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1421 {
1422         struct page_cgroup *pc;
1423         struct mem_cgroup *mem = NULL;
1424         struct mem_cgroup_per_zone *mz;
1425
1426         if (mem_cgroup_disabled())
1427                 return NULL;
1428
1429         if (PageSwapCache(page))
1430                 return NULL;
1431
1432         /*
1433          * Check if our page_cgroup is valid
1434          */
1435         pc = lookup_page_cgroup(page);
1436         if (unlikely(!pc || !PageCgroupUsed(pc)))
1437                 return NULL;
1438
1439         lock_page_cgroup(pc);
1440
1441         mem = pc->mem_cgroup;
1442
1443         if (!PageCgroupUsed(pc))
1444                 goto unlock_out;
1445
1446         switch (ctype) {
1447         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1448                 if (page_mapped(page))
1449                         goto unlock_out;
1450                 break;
1451         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1452                 if (!PageAnon(page)) {  /* Shared memory */
1453                         if (page->mapping && !page_is_file_cache(page))
1454                                 goto unlock_out;
1455                 } else if (page_mapped(page)) /* Anon */
1456                                 goto unlock_out;
1457                 break;
1458         default:
1459                 break;
1460         }
1461
1462         res_counter_uncharge(&mem->res, PAGE_SIZE);
1463         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1464                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1465         mem_cgroup_charge_statistics(mem, pc, false);
1466
1467         ClearPageCgroupUsed(pc);
1468         /*
1469          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1470          * freed from LRU. This is safe because uncharged page is expected not
1471          * to be reused (freed soon). Exception is SwapCache, it's handled by
1472          * special functions.
1473          */
1474
1475         mz = page_cgroup_zoneinfo(pc);
1476         unlock_page_cgroup(pc);
1477
1478         /* at swapout, this memcg will be accessed to record to swap */
1479         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1480                 css_put(&mem->css);
1481
1482         return mem;
1483
1484 unlock_out:
1485         unlock_page_cgroup(pc);
1486         return NULL;
1487 }
1488
1489 void mem_cgroup_uncharge_page(struct page *page)
1490 {
1491         /* early check. */
1492         if (page_mapped(page))
1493                 return;
1494         if (page->mapping && !PageAnon(page))
1495                 return;
1496         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1497 }
1498
1499 void mem_cgroup_uncharge_cache_page(struct page *page)
1500 {
1501         VM_BUG_ON(page_mapped(page));
1502         VM_BUG_ON(page->mapping);
1503         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1504 }
1505
1506 /*
1507  * called from __delete_from_swap_cache() and drop "page" account.
1508  * memcg information is recorded to swap_cgroup of "ent"
1509  */
1510 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1511 {
1512         struct mem_cgroup *memcg;
1513
1514         memcg = __mem_cgroup_uncharge_common(page,
1515                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1516         /* record memcg information */
1517         if (do_swap_account && memcg) {
1518                 swap_cgroup_record(ent, css_id(&memcg->css));
1519                 mem_cgroup_get(memcg);
1520         }
1521         if (memcg)
1522                 css_put(&memcg->css);
1523 }
1524
1525 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1526 /*
1527  * called from swap_entry_free(). remove record in swap_cgroup and
1528  * uncharge "memsw" account.
1529  */
1530 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1531 {
1532         struct mem_cgroup *memcg;
1533         unsigned short id;
1534
1535         if (!do_swap_account)
1536                 return;
1537
1538         id = swap_cgroup_record(ent, 0);
1539         rcu_read_lock();
1540         memcg = mem_cgroup_lookup(id);
1541         if (memcg) {
1542                 /*
1543                  * We uncharge this because swap is freed.
1544                  * This memcg can be obsolete one. We avoid calling css_tryget
1545                  */
1546                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1547                 mem_cgroup_put(memcg);
1548         }
1549         rcu_read_unlock();
1550 }
1551 #endif
1552
1553 /*
1554  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1555  * page belongs to.
1556  */
1557 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1558 {
1559         struct page_cgroup *pc;
1560         struct mem_cgroup *mem = NULL;
1561         int ret = 0;
1562
1563         if (mem_cgroup_disabled())
1564                 return 0;
1565
1566         pc = lookup_page_cgroup(page);
1567         lock_page_cgroup(pc);
1568         if (PageCgroupUsed(pc)) {
1569                 mem = pc->mem_cgroup;
1570                 css_get(&mem->css);
1571         }
1572         unlock_page_cgroup(pc);
1573
1574         if (mem) {
1575                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1576                 css_put(&mem->css);
1577         }
1578         *ptr = mem;
1579         return ret;
1580 }
1581
1582 /* remove redundant charge if migration failed*/
1583 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1584                 struct page *oldpage, struct page *newpage)
1585 {
1586         struct page *target, *unused;
1587         struct page_cgroup *pc;
1588         enum charge_type ctype;
1589
1590         if (!mem)
1591                 return;
1592
1593         /* at migration success, oldpage->mapping is NULL. */
1594         if (oldpage->mapping) {
1595                 target = oldpage;
1596                 unused = NULL;
1597         } else {
1598                 target = newpage;
1599                 unused = oldpage;
1600         }
1601
1602         if (PageAnon(target))
1603                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1604         else if (page_is_file_cache(target))
1605                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1606         else
1607                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1608
1609         /* unused page is not on radix-tree now. */
1610         if (unused)
1611                 __mem_cgroup_uncharge_common(unused, ctype);
1612
1613         pc = lookup_page_cgroup(target);
1614         /*
1615          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1616          * So, double-counting is effectively avoided.
1617          */
1618         __mem_cgroup_commit_charge(mem, pc, ctype);
1619
1620         /*
1621          * Both of oldpage and newpage are still under lock_page().
1622          * Then, we don't have to care about race in radix-tree.
1623          * But we have to be careful that this page is unmapped or not.
1624          *
1625          * There is a case for !page_mapped(). At the start of
1626          * migration, oldpage was mapped. But now, it's zapped.
1627          * But we know *target* page is not freed/reused under us.
1628          * mem_cgroup_uncharge_page() does all necessary checks.
1629          */
1630         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1631                 mem_cgroup_uncharge_page(target);
1632 }
1633
1634 /*
1635  * A call to try to shrink memory usage under specified resource controller.
1636  * This is typically used for page reclaiming for shmem for reducing side
1637  * effect of page allocation from shmem, which is used by some mem_cgroup.
1638  */
1639 int mem_cgroup_shrink_usage(struct page *page,
1640                             struct mm_struct *mm,
1641                             gfp_t gfp_mask)
1642 {
1643         struct mem_cgroup *mem = NULL;
1644         int progress = 0;
1645         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1646
1647         if (mem_cgroup_disabled())
1648                 return 0;
1649         if (page)
1650                 mem = try_get_mem_cgroup_from_swapcache(page);
1651         if (!mem && mm)
1652                 mem = try_get_mem_cgroup_from_mm(mm);
1653         if (unlikely(!mem))
1654                 return 0;
1655
1656         do {
1657                 progress = mem_cgroup_hierarchical_reclaim(mem,
1658                                         gfp_mask, true, false);
1659                 progress += mem_cgroup_check_under_limit(mem);
1660         } while (!progress && --retry);
1661
1662         css_put(&mem->css);
1663         if (!retry)
1664                 return -ENOMEM;
1665         return 0;
1666 }
1667
1668 static DEFINE_MUTEX(set_limit_mutex);
1669
1670 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1671                                 unsigned long long val)
1672 {
1673         int retry_count;
1674         int progress;
1675         u64 memswlimit;
1676         int ret = 0;
1677         int children = mem_cgroup_count_children(memcg);
1678         u64 curusage, oldusage;
1679
1680         /*
1681          * For keeping hierarchical_reclaim simple, how long we should retry
1682          * is depends on callers. We set our retry-count to be function
1683          * of # of children which we should visit in this loop.
1684          */
1685         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1686
1687         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1688
1689         while (retry_count) {
1690                 if (signal_pending(current)) {
1691                         ret = -EINTR;
1692                         break;
1693                 }
1694                 /*
1695                  * Rather than hide all in some function, I do this in
1696                  * open coded manner. You see what this really does.
1697                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1698                  */
1699                 mutex_lock(&set_limit_mutex);
1700                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1701                 if (memswlimit < val) {
1702                         ret = -EINVAL;
1703                         mutex_unlock(&set_limit_mutex);
1704                         break;
1705                 }
1706                 ret = res_counter_set_limit(&memcg->res, val);
1707                 mutex_unlock(&set_limit_mutex);
1708
1709                 if (!ret)
1710                         break;
1711
1712                 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1713                                                    false, true);
1714                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1715                 /* Usage is reduced ? */
1716                 if (curusage >= oldusage)
1717                         retry_count--;
1718                 else
1719                         oldusage = curusage;
1720         }
1721
1722         return ret;
1723 }
1724
1725 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1726                                 unsigned long long val)
1727 {
1728         int retry_count;
1729         u64 memlimit, oldusage, curusage;
1730         int children = mem_cgroup_count_children(memcg);
1731         int ret = -EBUSY;
1732
1733         if (!do_swap_account)
1734                 return -EINVAL;
1735         /* see mem_cgroup_resize_res_limit */
1736         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1737         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1738         while (retry_count) {
1739                 if (signal_pending(current)) {
1740                         ret = -EINTR;
1741                         break;
1742                 }
1743                 /*
1744                  * Rather than hide all in some function, I do this in
1745                  * open coded manner. You see what this really does.
1746                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1747                  */
1748                 mutex_lock(&set_limit_mutex);
1749                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1750                 if (memlimit > val) {
1751                         ret = -EINVAL;
1752                         mutex_unlock(&set_limit_mutex);
1753                         break;
1754                 }
1755                 ret = res_counter_set_limit(&memcg->memsw, val);
1756                 mutex_unlock(&set_limit_mutex);
1757
1758                 if (!ret)
1759                         break;
1760
1761                 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1762                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1763                 /* Usage is reduced ? */
1764                 if (curusage >= oldusage)
1765                         retry_count--;
1766                 else
1767                         oldusage = curusage;
1768         }
1769         return ret;
1770 }
1771
1772 /*
1773  * This routine traverse page_cgroup in given list and drop them all.
1774  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1775  */
1776 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1777                                 int node, int zid, enum lru_list lru)
1778 {
1779         struct zone *zone;
1780         struct mem_cgroup_per_zone *mz;
1781         struct page_cgroup *pc, *busy;
1782         unsigned long flags, loop;
1783         struct list_head *list;
1784         int ret = 0;
1785
1786         zone = &NODE_DATA(node)->node_zones[zid];
1787         mz = mem_cgroup_zoneinfo(mem, node, zid);
1788         list = &mz->lists[lru];
1789
1790         loop = MEM_CGROUP_ZSTAT(mz, lru);
1791         /* give some margin against EBUSY etc...*/
1792         loop += 256;
1793         busy = NULL;
1794         while (loop--) {
1795                 ret = 0;
1796                 spin_lock_irqsave(&zone->lru_lock, flags);
1797                 if (list_empty(list)) {
1798                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1799                         break;
1800                 }
1801                 pc = list_entry(list->prev, struct page_cgroup, lru);
1802                 if (busy == pc) {
1803                         list_move(&pc->lru, list);
1804                         busy = 0;
1805                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1806                         continue;
1807                 }
1808                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1809
1810                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1811                 if (ret == -ENOMEM)
1812                         break;
1813
1814                 if (ret == -EBUSY || ret == -EINVAL) {
1815                         /* found lock contention or "pc" is obsolete. */
1816                         busy = pc;
1817                         cond_resched();
1818                 } else
1819                         busy = NULL;
1820         }
1821
1822         if (!ret && !list_empty(list))
1823                 return -EBUSY;
1824         return ret;
1825 }
1826
1827 /*
1828  * make mem_cgroup's charge to be 0 if there is no task.
1829  * This enables deleting this mem_cgroup.
1830  */
1831 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1832 {
1833         int ret;
1834         int node, zid, shrink;
1835         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1836         struct cgroup *cgrp = mem->css.cgroup;
1837
1838         css_get(&mem->css);
1839
1840         shrink = 0;
1841         /* should free all ? */
1842         if (free_all)
1843                 goto try_to_free;
1844 move_account:
1845         while (mem->res.usage > 0) {
1846                 ret = -EBUSY;
1847                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1848                         goto out;
1849                 ret = -EINTR;
1850                 if (signal_pending(current))
1851                         goto out;
1852                 /* This is for making all *used* pages to be on LRU. */
1853                 lru_add_drain_all();
1854                 ret = 0;
1855                 for_each_node_state(node, N_HIGH_MEMORY) {
1856                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1857                                 enum lru_list l;
1858                                 for_each_lru(l) {
1859                                         ret = mem_cgroup_force_empty_list(mem,
1860                                                         node, zid, l);
1861                                         if (ret)
1862                                                 break;
1863                                 }
1864                         }
1865                         if (ret)
1866                                 break;
1867                 }
1868                 /* it seems parent cgroup doesn't have enough mem */
1869                 if (ret == -ENOMEM)
1870                         goto try_to_free;
1871                 cond_resched();
1872         }
1873         ret = 0;
1874 out:
1875         css_put(&mem->css);
1876         return ret;
1877
1878 try_to_free:
1879         /* returns EBUSY if there is a task or if we come here twice. */
1880         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1881                 ret = -EBUSY;
1882                 goto out;
1883         }
1884         /* we call try-to-free pages for make this cgroup empty */
1885         lru_add_drain_all();
1886         /* try to free all pages in this cgroup */
1887         shrink = 1;
1888         while (nr_retries && mem->res.usage > 0) {
1889                 int progress;
1890
1891                 if (signal_pending(current)) {
1892                         ret = -EINTR;
1893                         goto out;
1894                 }
1895                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1896                                                 false, get_swappiness(mem));
1897                 if (!progress) {
1898                         nr_retries--;
1899                         /* maybe some writeback is necessary */
1900                         congestion_wait(WRITE, HZ/10);
1901                 }
1902
1903         }
1904         lru_add_drain();
1905         /* try move_account...there may be some *locked* pages. */
1906         if (mem->res.usage)
1907                 goto move_account;
1908         ret = 0;
1909         goto out;
1910 }
1911
1912 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1913 {
1914         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1915 }
1916
1917
1918 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1919 {
1920         return mem_cgroup_from_cont(cont)->use_hierarchy;
1921 }
1922
1923 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1924                                         u64 val)
1925 {
1926         int retval = 0;
1927         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1928         struct cgroup *parent = cont->parent;
1929         struct mem_cgroup *parent_mem = NULL;
1930
1931         if (parent)
1932                 parent_mem = mem_cgroup_from_cont(parent);
1933
1934         cgroup_lock();
1935         /*
1936          * If parent's use_hiearchy is set, we can't make any modifications
1937          * in the child subtrees. If it is unset, then the change can
1938          * occur, provided the current cgroup has no children.
1939          *
1940          * For the root cgroup, parent_mem is NULL, we allow value to be
1941          * set if there are no children.
1942          */
1943         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1944                                 (val == 1 || val == 0)) {
1945                 if (list_empty(&cont->children))
1946                         mem->use_hierarchy = val;
1947                 else
1948                         retval = -EBUSY;
1949         } else
1950                 retval = -EINVAL;
1951         cgroup_unlock();
1952
1953         return retval;
1954 }
1955
1956 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1957 {
1958         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1959         u64 val = 0;
1960         int type, name;
1961
1962         type = MEMFILE_TYPE(cft->private);
1963         name = MEMFILE_ATTR(cft->private);
1964         switch (type) {
1965         case _MEM:
1966                 val = res_counter_read_u64(&mem->res, name);
1967                 break;
1968         case _MEMSWAP:
1969                 if (do_swap_account)
1970                         val = res_counter_read_u64(&mem->memsw, name);
1971                 break;
1972         default:
1973                 BUG();
1974                 break;
1975         }
1976         return val;
1977 }
1978 /*
1979  * The user of this function is...
1980  * RES_LIMIT.
1981  */
1982 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1983                             const char *buffer)
1984 {
1985         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1986         int type, name;
1987         unsigned long long val;
1988         int ret;
1989
1990         type = MEMFILE_TYPE(cft->private);
1991         name = MEMFILE_ATTR(cft->private);
1992         switch (name) {
1993         case RES_LIMIT:
1994                 /* This function does all necessary parse...reuse it */
1995                 ret = res_counter_memparse_write_strategy(buffer, &val);
1996                 if (ret)
1997                         break;
1998                 if (type == _MEM)
1999                         ret = mem_cgroup_resize_limit(memcg, val);
2000                 else
2001                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
2002                 break;
2003         default:
2004                 ret = -EINVAL; /* should be BUG() ? */
2005                 break;
2006         }
2007         return ret;
2008 }
2009
2010 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
2011                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
2012 {
2013         struct cgroup *cgroup;
2014         unsigned long long min_limit, min_memsw_limit, tmp;
2015
2016         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2017         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2018         cgroup = memcg->css.cgroup;
2019         if (!memcg->use_hierarchy)
2020                 goto out;
2021
2022         while (cgroup->parent) {
2023                 cgroup = cgroup->parent;
2024                 memcg = mem_cgroup_from_cont(cgroup);
2025                 if (!memcg->use_hierarchy)
2026                         break;
2027                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
2028                 min_limit = min(min_limit, tmp);
2029                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2030                 min_memsw_limit = min(min_memsw_limit, tmp);
2031         }
2032 out:
2033         *mem_limit = min_limit;
2034         *memsw_limit = min_memsw_limit;
2035         return;
2036 }
2037
2038 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
2039 {
2040         struct mem_cgroup *mem;
2041         int type, name;
2042
2043         mem = mem_cgroup_from_cont(cont);
2044         type = MEMFILE_TYPE(event);
2045         name = MEMFILE_ATTR(event);
2046         switch (name) {
2047         case RES_MAX_USAGE:
2048                 if (type == _MEM)
2049                         res_counter_reset_max(&mem->res);
2050                 else
2051                         res_counter_reset_max(&mem->memsw);
2052                 break;
2053         case RES_FAILCNT:
2054                 if (type == _MEM)
2055                         res_counter_reset_failcnt(&mem->res);
2056                 else
2057                         res_counter_reset_failcnt(&mem->memsw);
2058                 break;
2059         }
2060         return 0;
2061 }
2062
2063
2064 /* For read statistics */
2065 enum {
2066         MCS_CACHE,
2067         MCS_RSS,
2068         MCS_PGPGIN,
2069         MCS_PGPGOUT,
2070         MCS_INACTIVE_ANON,
2071         MCS_ACTIVE_ANON,
2072         MCS_INACTIVE_FILE,
2073         MCS_ACTIVE_FILE,
2074         MCS_UNEVICTABLE,
2075         NR_MCS_STAT,
2076 };
2077
2078 struct mcs_total_stat {
2079         s64 stat[NR_MCS_STAT];
2080 };
2081
2082 struct {
2083         char *local_name;
2084         char *total_name;
2085 } memcg_stat_strings[NR_MCS_STAT] = {
2086         {"cache", "total_cache"},
2087         {"rss", "total_rss"},
2088         {"pgpgin", "total_pgpgin"},
2089         {"pgpgout", "total_pgpgout"},
2090         {"inactive_anon", "total_inactive_anon"},
2091         {"active_anon", "total_active_anon"},
2092         {"inactive_file", "total_inactive_file"},
2093         {"active_file", "total_active_file"},
2094         {"unevictable", "total_unevictable"}
2095 };
2096
2097
2098 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2099 {
2100         struct mcs_total_stat *s = data;
2101         s64 val;
2102
2103         /* per cpu stat */
2104         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2105         s->stat[MCS_CACHE] += val * PAGE_SIZE;
2106         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2107         s->stat[MCS_RSS] += val * PAGE_SIZE;
2108         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2109         s->stat[MCS_PGPGIN] += val;
2110         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2111         s->stat[MCS_PGPGOUT] += val;
2112
2113         /* per zone stat */
2114         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2115         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2116         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2117         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2118         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2119         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2120         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2121         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2122         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2123         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2124         return 0;
2125 }
2126
2127 static void
2128 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2129 {
2130         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2131 }
2132
2133 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2134                                  struct cgroup_map_cb *cb)
2135 {
2136         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2137         struct mcs_total_stat mystat;
2138         int i;
2139
2140         memset(&mystat, 0, sizeof(mystat));
2141         mem_cgroup_get_local_stat(mem_cont, &mystat);
2142
2143         for (i = 0; i < NR_MCS_STAT; i++)
2144                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2145
2146         /* Hierarchical information */
2147         {
2148                 unsigned long long limit, memsw_limit;
2149                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2150                 cb->fill(cb, "hierarchical_memory_limit", limit);
2151                 if (do_swap_account)
2152                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2153         }
2154
2155         memset(&mystat, 0, sizeof(mystat));
2156         mem_cgroup_get_total_stat(mem_cont, &mystat);
2157         for (i = 0; i < NR_MCS_STAT; i++)
2158                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2159
2160
2161 #ifdef CONFIG_DEBUG_VM
2162         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2163
2164         {
2165                 int nid, zid;
2166                 struct mem_cgroup_per_zone *mz;
2167                 unsigned long recent_rotated[2] = {0, 0};
2168                 unsigned long recent_scanned[2] = {0, 0};
2169
2170                 for_each_online_node(nid)
2171                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2172                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2173
2174                                 recent_rotated[0] +=
2175                                         mz->reclaim_stat.recent_rotated[0];
2176                                 recent_rotated[1] +=
2177                                         mz->reclaim_stat.recent_rotated[1];
2178                                 recent_scanned[0] +=
2179                                         mz->reclaim_stat.recent_scanned[0];
2180                                 recent_scanned[1] +=
2181                                         mz->reclaim_stat.recent_scanned[1];
2182                         }
2183                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2184                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2185                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2186                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2187         }
2188 #endif
2189
2190         return 0;
2191 }
2192
2193 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2194 {
2195         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2196
2197         return get_swappiness(memcg);
2198 }
2199
2200 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2201                                        u64 val)
2202 {
2203         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2204         struct mem_cgroup *parent;
2205
2206         if (val > 100)
2207                 return -EINVAL;
2208
2209         if (cgrp->parent == NULL)
2210                 return -EINVAL;
2211
2212         parent = mem_cgroup_from_cont(cgrp->parent);
2213
2214         cgroup_lock();
2215
2216         /* If under hierarchy, only empty-root can set this value */
2217         if ((parent->use_hierarchy) ||
2218             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2219                 cgroup_unlock();
2220                 return -EINVAL;
2221         }
2222
2223         spin_lock(&memcg->reclaim_param_lock);
2224         memcg->swappiness = val;
2225         spin_unlock(&memcg->reclaim_param_lock);
2226
2227         cgroup_unlock();
2228
2229         return 0;
2230 }
2231
2232
2233 static struct cftype mem_cgroup_files[] = {
2234         {
2235                 .name = "usage_in_bytes",
2236                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2237                 .read_u64 = mem_cgroup_read,
2238         },
2239         {
2240                 .name = "max_usage_in_bytes",
2241                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2242                 .trigger = mem_cgroup_reset,
2243                 .read_u64 = mem_cgroup_read,
2244         },
2245         {
2246                 .name = "limit_in_bytes",
2247                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2248                 .write_string = mem_cgroup_write,
2249                 .read_u64 = mem_cgroup_read,
2250         },
2251         {
2252                 .name = "failcnt",
2253                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2254                 .trigger = mem_cgroup_reset,
2255                 .read_u64 = mem_cgroup_read,
2256         },
2257         {
2258                 .name = "stat",
2259                 .read_map = mem_control_stat_show,
2260         },
2261         {
2262                 .name = "force_empty",
2263                 .trigger = mem_cgroup_force_empty_write,
2264         },
2265         {
2266                 .name = "use_hierarchy",
2267                 .write_u64 = mem_cgroup_hierarchy_write,
2268                 .read_u64 = mem_cgroup_hierarchy_read,
2269         },
2270         {
2271                 .name = "swappiness",
2272                 .read_u64 = mem_cgroup_swappiness_read,
2273                 .write_u64 = mem_cgroup_swappiness_write,
2274         },
2275 };
2276
2277 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2278 static struct cftype memsw_cgroup_files[] = {
2279         {
2280                 .name = "memsw.usage_in_bytes",
2281                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2282                 .read_u64 = mem_cgroup_read,
2283         },
2284         {
2285                 .name = "memsw.max_usage_in_bytes",
2286                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2287                 .trigger = mem_cgroup_reset,
2288                 .read_u64 = mem_cgroup_read,
2289         },
2290         {
2291                 .name = "memsw.limit_in_bytes",
2292                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2293                 .write_string = mem_cgroup_write,
2294                 .read_u64 = mem_cgroup_read,
2295         },
2296         {
2297                 .name = "memsw.failcnt",
2298                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2299                 .trigger = mem_cgroup_reset,
2300                 .read_u64 = mem_cgroup_read,
2301         },
2302 };
2303
2304 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2305 {
2306         if (!do_swap_account)
2307                 return 0;
2308         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2309                                 ARRAY_SIZE(memsw_cgroup_files));
2310 };
2311 #else
2312 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2313 {
2314         return 0;
2315 }
2316 #endif
2317
2318 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2319 {
2320         struct mem_cgroup_per_node *pn;
2321         struct mem_cgroup_per_zone *mz;
2322         enum lru_list l;
2323         int zone, tmp = node;
2324         /*
2325          * This routine is called against possible nodes.
2326          * But it's BUG to call kmalloc() against offline node.
2327          *
2328          * TODO: this routine can waste much memory for nodes which will
2329          *       never be onlined. It's better to use memory hotplug callback
2330          *       function.
2331          */
2332         if (!node_state(node, N_NORMAL_MEMORY))
2333                 tmp = -1;
2334         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2335         if (!pn)
2336                 return 1;
2337
2338         mem->info.nodeinfo[node] = pn;
2339         memset(pn, 0, sizeof(*pn));
2340
2341         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2342                 mz = &pn->zoneinfo[zone];
2343                 for_each_lru(l)
2344                         INIT_LIST_HEAD(&mz->lists[l]);
2345         }
2346         return 0;
2347 }
2348
2349 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2350 {
2351         kfree(mem->info.nodeinfo[node]);
2352 }
2353
2354 static int mem_cgroup_size(void)
2355 {
2356         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2357         return sizeof(struct mem_cgroup) + cpustat_size;
2358 }
2359
2360 static struct mem_cgroup *mem_cgroup_alloc(void)
2361 {
2362         struct mem_cgroup *mem;
2363         int size = mem_cgroup_size();
2364
2365         if (size < PAGE_SIZE)
2366                 mem = kmalloc(size, GFP_KERNEL);
2367         else
2368                 mem = vmalloc(size);
2369
2370         if (mem)
2371                 memset(mem, 0, size);
2372         return mem;
2373 }
2374
2375 /*
2376  * At destroying mem_cgroup, references from swap_cgroup can remain.
2377  * (scanning all at force_empty is too costly...)
2378  *
2379  * Instead of clearing all references at force_empty, we remember
2380  * the number of reference from swap_cgroup and free mem_cgroup when
2381  * it goes down to 0.
2382  *
2383  * Removal of cgroup itself succeeds regardless of refs from swap.
2384  */
2385
2386 static void __mem_cgroup_free(struct mem_cgroup *mem)
2387 {
2388         int node;
2389
2390         free_css_id(&mem_cgroup_subsys, &mem->css);
2391
2392         for_each_node_state(node, N_POSSIBLE)
2393                 free_mem_cgroup_per_zone_info(mem, node);
2394
2395         if (mem_cgroup_size() < PAGE_SIZE)
2396                 kfree(mem);
2397         else
2398                 vfree(mem);
2399 }
2400
2401 static void mem_cgroup_get(struct mem_cgroup *mem)
2402 {
2403         atomic_inc(&mem->refcnt);
2404 }
2405
2406 static void mem_cgroup_put(struct mem_cgroup *mem)
2407 {
2408         if (atomic_dec_and_test(&mem->refcnt)) {
2409                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
2410                 __mem_cgroup_free(mem);
2411                 if (parent)
2412                         mem_cgroup_put(parent);
2413         }
2414 }
2415
2416 /*
2417  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
2418  */
2419 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
2420 {
2421         if (!mem->res.parent)
2422                 return NULL;
2423         return mem_cgroup_from_res_counter(mem->res.parent, res);
2424 }
2425
2426 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2427 static void __init enable_swap_cgroup(void)
2428 {
2429         if (!mem_cgroup_disabled() && really_do_swap_account)
2430                 do_swap_account = 1;
2431 }
2432 #else
2433 static void __init enable_swap_cgroup(void)
2434 {
2435 }
2436 #endif
2437
2438 static struct cgroup_subsys_state * __ref
2439 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2440 {
2441         struct mem_cgroup *mem, *parent;
2442         long error = -ENOMEM;
2443         int node;
2444
2445         mem = mem_cgroup_alloc();
2446         if (!mem)
2447                 return ERR_PTR(error);
2448
2449         for_each_node_state(node, N_POSSIBLE)
2450                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2451                         goto free_out;
2452         /* root ? */
2453         if (cont->parent == NULL) {
2454                 enable_swap_cgroup();
2455                 parent = NULL;
2456         } else {
2457                 parent = mem_cgroup_from_cont(cont->parent);
2458                 mem->use_hierarchy = parent->use_hierarchy;
2459         }
2460
2461         if (parent && parent->use_hierarchy) {
2462                 res_counter_init(&mem->res, &parent->res);
2463                 res_counter_init(&mem->memsw, &parent->memsw);
2464                 /*
2465                  * We increment refcnt of the parent to ensure that we can
2466                  * safely access it on res_counter_charge/uncharge.
2467                  * This refcnt will be decremented when freeing this
2468                  * mem_cgroup(see mem_cgroup_put).
2469                  */
2470                 mem_cgroup_get(parent);
2471         } else {
2472                 res_counter_init(&mem->res, NULL);
2473                 res_counter_init(&mem->memsw, NULL);
2474         }
2475         mem->last_scanned_child = 0;
2476         spin_lock_init(&mem->reclaim_param_lock);
2477
2478         if (parent)
2479                 mem->swappiness = get_swappiness(parent);
2480         atomic_set(&mem->refcnt, 1);
2481         return &mem->css;
2482 free_out:
2483         __mem_cgroup_free(mem);
2484         return ERR_PTR(error);
2485 }
2486
2487 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2488                                         struct cgroup *cont)
2489 {
2490         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2491
2492         return mem_cgroup_force_empty(mem, false);
2493 }
2494
2495 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2496                                 struct cgroup *cont)
2497 {
2498         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2499
2500         mem_cgroup_put(mem);
2501 }
2502
2503 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2504                                 struct cgroup *cont)
2505 {
2506         int ret;
2507
2508         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2509                                 ARRAY_SIZE(mem_cgroup_files));
2510
2511         if (!ret)
2512                 ret = register_memsw_files(cont, ss);
2513         return ret;
2514 }
2515
2516 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2517                                 struct cgroup *cont,
2518                                 struct cgroup *old_cont,
2519                                 struct task_struct *p)
2520 {
2521         mutex_lock(&memcg_tasklist);
2522         /*
2523          * FIXME: It's better to move charges of this process from old
2524          * memcg to new memcg. But it's just on TODO-List now.
2525          */
2526         mutex_unlock(&memcg_tasklist);
2527 }
2528
2529 struct cgroup_subsys mem_cgroup_subsys = {
2530         .name = "memory",
2531         .subsys_id = mem_cgroup_subsys_id,
2532         .create = mem_cgroup_create,
2533         .pre_destroy = mem_cgroup_pre_destroy,
2534         .destroy = mem_cgroup_destroy,
2535         .populate = mem_cgroup_populate,
2536         .attach = mem_cgroup_move_task,
2537         .early_init = 0,
2538         .use_id = 1,
2539 };
2540
2541 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2542
2543 static int __init disable_swap_account(char *s)
2544 {
2545         really_do_swap_account = 0;
2546         return 1;
2547 }
2548 __setup("noswapaccount", disable_swap_account);
2549 #endif