]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - mm/memcontrol.c
Merge branch 'rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
[linux-2.6-omap-h63xx.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/swap.h>
34 #include <linux/spinlock.h>
35 #include <linux/fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/vmalloc.h>
38 #include <linux/mm_inline.h>
39 #include <linux/page_cgroup.h>
40 #include "internal.h"
41
42 #include <asm/uaccess.h>
43
44 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
45 #define MEM_CGROUP_RECLAIM_RETRIES      5
46
47 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
48 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
49 int do_swap_account __read_mostly;
50 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
51 #else
52 #define do_swap_account         (0)
53 #endif
54
55 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
56
57 /*
58  * Statistics for memory cgroup.
59  */
60 enum mem_cgroup_stat_index {
61         /*
62          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
63          */
64         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
65         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
66         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
67         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
68
69         MEM_CGROUP_STAT_NSTATS,
70 };
71
72 struct mem_cgroup_stat_cpu {
73         s64 count[MEM_CGROUP_STAT_NSTATS];
74 } ____cacheline_aligned_in_smp;
75
76 struct mem_cgroup_stat {
77         struct mem_cgroup_stat_cpu cpustat[0];
78 };
79
80 /*
81  * For accounting under irq disable, no need for increment preempt count.
82  */
83 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
84                 enum mem_cgroup_stat_index idx, int val)
85 {
86         stat->count[idx] += val;
87 }
88
89 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
90                 enum mem_cgroup_stat_index idx)
91 {
92         int cpu;
93         s64 ret = 0;
94         for_each_possible_cpu(cpu)
95                 ret += stat->cpustat[cpu].count[idx];
96         return ret;
97 }
98
99 static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
100 {
101         s64 ret;
102
103         ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
104         ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
105         return ret;
106 }
107
108 /*
109  * per-zone information in memory controller.
110  */
111 struct mem_cgroup_per_zone {
112         /*
113          * spin_lock to protect the per cgroup LRU
114          */
115         struct list_head        lists[NR_LRU_LISTS];
116         unsigned long           count[NR_LRU_LISTS];
117
118         struct zone_reclaim_stat reclaim_stat;
119 };
120 /* Macro for accessing counter */
121 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
122
123 struct mem_cgroup_per_node {
124         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
125 };
126
127 struct mem_cgroup_lru_info {
128         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
129 };
130
131 /*
132  * The memory controller data structure. The memory controller controls both
133  * page cache and RSS per cgroup. We would eventually like to provide
134  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
135  * to help the administrator determine what knobs to tune.
136  *
137  * TODO: Add a water mark for the memory controller. Reclaim will begin when
138  * we hit the water mark. May be even add a low water mark, such that
139  * no reclaim occurs from a cgroup at it's low water mark, this is
140  * a feature that will be implemented much later in the future.
141  */
142 struct mem_cgroup {
143         struct cgroup_subsys_state css;
144         /*
145          * the counter to account for memory usage
146          */
147         struct res_counter res;
148         /*
149          * the counter to account for mem+swap usage.
150          */
151         struct res_counter memsw;
152         /*
153          * Per cgroup active and inactive list, similar to the
154          * per zone LRU lists.
155          */
156         struct mem_cgroup_lru_info info;
157
158         /*
159           protect against reclaim related member.
160         */
161         spinlock_t reclaim_param_lock;
162
163         int     prev_priority;  /* for recording reclaim priority */
164
165         /*
166          * While reclaiming in a hiearchy, we cache the last child we
167          * reclaimed from.
168          */
169         int last_scanned_child;
170         /*
171          * Should the accounting and control be hierarchical, per subtree?
172          */
173         bool use_hierarchy;
174         unsigned long   last_oom_jiffies;
175         atomic_t        refcnt;
176
177         unsigned int    swappiness;
178
179         /*
180          * statistics. This must be placed at the end of memcg.
181          */
182         struct mem_cgroup_stat stat;
183 };
184
185 enum charge_type {
186         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
187         MEM_CGROUP_CHARGE_TYPE_MAPPED,
188         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
189         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
190         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
191         NR_CHARGE_TYPE,
192 };
193
194 /* only for here (for easy reading.) */
195 #define PCGF_CACHE      (1UL << PCG_CACHE)
196 #define PCGF_USED       (1UL << PCG_USED)
197 #define PCGF_LOCK       (1UL << PCG_LOCK)
198 static const unsigned long
199 pcg_default_flags[NR_CHARGE_TYPE] = {
200         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
201         PCGF_USED | PCGF_LOCK, /* Anon */
202         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
203         0, /* FORCE */
204 };
205
206 /* for encoding cft->private value on file */
207 #define _MEM                    (0)
208 #define _MEMSWAP                (1)
209 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
210 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
211 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
212
213 static void mem_cgroup_get(struct mem_cgroup *mem);
214 static void mem_cgroup_put(struct mem_cgroup *mem);
215 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
216
217 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
218                                          struct page_cgroup *pc,
219                                          bool charge)
220 {
221         int val = (charge)? 1 : -1;
222         struct mem_cgroup_stat *stat = &mem->stat;
223         struct mem_cgroup_stat_cpu *cpustat;
224         int cpu = get_cpu();
225
226         cpustat = &stat->cpustat[cpu];
227         if (PageCgroupCache(pc))
228                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
229         else
230                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
231
232         if (charge)
233                 __mem_cgroup_stat_add_safe(cpustat,
234                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
235         else
236                 __mem_cgroup_stat_add_safe(cpustat,
237                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
238         put_cpu();
239 }
240
241 static struct mem_cgroup_per_zone *
242 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
243 {
244         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
245 }
246
247 static struct mem_cgroup_per_zone *
248 page_cgroup_zoneinfo(struct page_cgroup *pc)
249 {
250         struct mem_cgroup *mem = pc->mem_cgroup;
251         int nid = page_cgroup_nid(pc);
252         int zid = page_cgroup_zid(pc);
253
254         if (!mem)
255                 return NULL;
256
257         return mem_cgroup_zoneinfo(mem, nid, zid);
258 }
259
260 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
261                                         enum lru_list idx)
262 {
263         int nid, zid;
264         struct mem_cgroup_per_zone *mz;
265         u64 total = 0;
266
267         for_each_online_node(nid)
268                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
269                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
270                         total += MEM_CGROUP_ZSTAT(mz, idx);
271                 }
272         return total;
273 }
274
275 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
276 {
277         return container_of(cgroup_subsys_state(cont,
278                                 mem_cgroup_subsys_id), struct mem_cgroup,
279                                 css);
280 }
281
282 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
283 {
284         /*
285          * mm_update_next_owner() may clear mm->owner to NULL
286          * if it races with swapoff, page migration, etc.
287          * So this can be called with p == NULL.
288          */
289         if (unlikely(!p))
290                 return NULL;
291
292         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
293                                 struct mem_cgroup, css);
294 }
295
296 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
297 {
298         struct mem_cgroup *mem = NULL;
299
300         if (!mm)
301                 return NULL;
302         /*
303          * Because we have no locks, mm->owner's may be being moved to other
304          * cgroup. We use css_tryget() here even if this looks
305          * pessimistic (rather than adding locks here).
306          */
307         rcu_read_lock();
308         do {
309                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
310                 if (unlikely(!mem))
311                         break;
312         } while (!css_tryget(&mem->css));
313         rcu_read_unlock();
314         return mem;
315 }
316
317 static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
318 {
319         if (!mem)
320                 return true;
321         return css_is_removed(&mem->css);
322 }
323
324
325 /*
326  * Call callback function against all cgroup under hierarchy tree.
327  */
328 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
329                           int (*func)(struct mem_cgroup *, void *))
330 {
331         int found, ret, nextid;
332         struct cgroup_subsys_state *css;
333         struct mem_cgroup *mem;
334
335         if (!root->use_hierarchy)
336                 return (*func)(root, data);
337
338         nextid = 1;
339         do {
340                 ret = 0;
341                 mem = NULL;
342
343                 rcu_read_lock();
344                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
345                                    &found);
346                 if (css && css_tryget(css))
347                         mem = container_of(css, struct mem_cgroup, css);
348                 rcu_read_unlock();
349
350                 if (mem) {
351                         ret = (*func)(mem, data);
352                         css_put(&mem->css);
353                 }
354                 nextid = found + 1;
355         } while (!ret && css);
356
357         return ret;
358 }
359
360 /*
361  * Following LRU functions are allowed to be used without PCG_LOCK.
362  * Operations are called by routine of global LRU independently from memcg.
363  * What we have to take care of here is validness of pc->mem_cgroup.
364  *
365  * Changes to pc->mem_cgroup happens when
366  * 1. charge
367  * 2. moving account
368  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
369  * It is added to LRU before charge.
370  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
371  * When moving account, the page is not on LRU. It's isolated.
372  */
373
374 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
375 {
376         struct page_cgroup *pc;
377         struct mem_cgroup *mem;
378         struct mem_cgroup_per_zone *mz;
379
380         if (mem_cgroup_disabled())
381                 return;
382         pc = lookup_page_cgroup(page);
383         /* can happen while we handle swapcache. */
384         if (list_empty(&pc->lru) || !pc->mem_cgroup)
385                 return;
386         /*
387          * We don't check PCG_USED bit. It's cleared when the "page" is finally
388          * removed from global LRU.
389          */
390         mz = page_cgroup_zoneinfo(pc);
391         mem = pc->mem_cgroup;
392         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
393         list_del_init(&pc->lru);
394         return;
395 }
396
397 void mem_cgroup_del_lru(struct page *page)
398 {
399         mem_cgroup_del_lru_list(page, page_lru(page));
400 }
401
402 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
403 {
404         struct mem_cgroup_per_zone *mz;
405         struct page_cgroup *pc;
406
407         if (mem_cgroup_disabled())
408                 return;
409
410         pc = lookup_page_cgroup(page);
411         /*
412          * Used bit is set without atomic ops but after smp_wmb().
413          * For making pc->mem_cgroup visible, insert smp_rmb() here.
414          */
415         smp_rmb();
416         /* unused page is not rotated. */
417         if (!PageCgroupUsed(pc))
418                 return;
419         mz = page_cgroup_zoneinfo(pc);
420         list_move(&pc->lru, &mz->lists[lru]);
421 }
422
423 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
424 {
425         struct page_cgroup *pc;
426         struct mem_cgroup_per_zone *mz;
427
428         if (mem_cgroup_disabled())
429                 return;
430         pc = lookup_page_cgroup(page);
431         /*
432          * Used bit is set without atomic ops but after smp_wmb().
433          * For making pc->mem_cgroup visible, insert smp_rmb() here.
434          */
435         smp_rmb();
436         if (!PageCgroupUsed(pc))
437                 return;
438
439         mz = page_cgroup_zoneinfo(pc);
440         MEM_CGROUP_ZSTAT(mz, lru) += 1;
441         list_add(&pc->lru, &mz->lists[lru]);
442 }
443
444 /*
445  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
446  * lru because the page may.be reused after it's fully uncharged (because of
447  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
448  * it again. This function is only used to charge SwapCache. It's done under
449  * lock_page and expected that zone->lru_lock is never held.
450  */
451 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
452 {
453         unsigned long flags;
454         struct zone *zone = page_zone(page);
455         struct page_cgroup *pc = lookup_page_cgroup(page);
456
457         spin_lock_irqsave(&zone->lru_lock, flags);
458         /*
459          * Forget old LRU when this page_cgroup is *not* used. This Used bit
460          * is guarded by lock_page() because the page is SwapCache.
461          */
462         if (!PageCgroupUsed(pc))
463                 mem_cgroup_del_lru_list(page, page_lru(page));
464         spin_unlock_irqrestore(&zone->lru_lock, flags);
465 }
466
467 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
468 {
469         unsigned long flags;
470         struct zone *zone = page_zone(page);
471         struct page_cgroup *pc = lookup_page_cgroup(page);
472
473         spin_lock_irqsave(&zone->lru_lock, flags);
474         /* link when the page is linked to LRU but page_cgroup isn't */
475         if (PageLRU(page) && list_empty(&pc->lru))
476                 mem_cgroup_add_lru_list(page, page_lru(page));
477         spin_unlock_irqrestore(&zone->lru_lock, flags);
478 }
479
480
481 void mem_cgroup_move_lists(struct page *page,
482                            enum lru_list from, enum lru_list to)
483 {
484         if (mem_cgroup_disabled())
485                 return;
486         mem_cgroup_del_lru_list(page, from);
487         mem_cgroup_add_lru_list(page, to);
488 }
489
490 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
491 {
492         int ret;
493         struct mem_cgroup *curr = NULL;
494
495         task_lock(task);
496         rcu_read_lock();
497         curr = try_get_mem_cgroup_from_mm(task->mm);
498         rcu_read_unlock();
499         task_unlock(task);
500         if (!curr)
501                 return 0;
502         if (curr->use_hierarchy)
503                 ret = css_is_ancestor(&curr->css, &mem->css);
504         else
505                 ret = (curr == mem);
506         css_put(&curr->css);
507         return ret;
508 }
509
510 /*
511  * prev_priority control...this will be used in memory reclaim path.
512  */
513 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
514 {
515         int prev_priority;
516
517         spin_lock(&mem->reclaim_param_lock);
518         prev_priority = mem->prev_priority;
519         spin_unlock(&mem->reclaim_param_lock);
520
521         return prev_priority;
522 }
523
524 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
525 {
526         spin_lock(&mem->reclaim_param_lock);
527         if (priority < mem->prev_priority)
528                 mem->prev_priority = priority;
529         spin_unlock(&mem->reclaim_param_lock);
530 }
531
532 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
533 {
534         spin_lock(&mem->reclaim_param_lock);
535         mem->prev_priority = priority;
536         spin_unlock(&mem->reclaim_param_lock);
537 }
538
539 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
540 {
541         unsigned long active;
542         unsigned long inactive;
543         unsigned long gb;
544         unsigned long inactive_ratio;
545
546         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
547         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
548
549         gb = (inactive + active) >> (30 - PAGE_SHIFT);
550         if (gb)
551                 inactive_ratio = int_sqrt(10 * gb);
552         else
553                 inactive_ratio = 1;
554
555         if (present_pages) {
556                 present_pages[0] = inactive;
557                 present_pages[1] = active;
558         }
559
560         return inactive_ratio;
561 }
562
563 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
564 {
565         unsigned long active;
566         unsigned long inactive;
567         unsigned long present_pages[2];
568         unsigned long inactive_ratio;
569
570         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
571
572         inactive = present_pages[0];
573         active = present_pages[1];
574
575         if (inactive * inactive_ratio < active)
576                 return 1;
577
578         return 0;
579 }
580
581 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
582                                        struct zone *zone,
583                                        enum lru_list lru)
584 {
585         int nid = zone->zone_pgdat->node_id;
586         int zid = zone_idx(zone);
587         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
588
589         return MEM_CGROUP_ZSTAT(mz, lru);
590 }
591
592 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
593                                                       struct zone *zone)
594 {
595         int nid = zone->zone_pgdat->node_id;
596         int zid = zone_idx(zone);
597         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
598
599         return &mz->reclaim_stat;
600 }
601
602 struct zone_reclaim_stat *
603 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
604 {
605         struct page_cgroup *pc;
606         struct mem_cgroup_per_zone *mz;
607
608         if (mem_cgroup_disabled())
609                 return NULL;
610
611         pc = lookup_page_cgroup(page);
612         /*
613          * Used bit is set without atomic ops but after smp_wmb().
614          * For making pc->mem_cgroup visible, insert smp_rmb() here.
615          */
616         smp_rmb();
617         if (!PageCgroupUsed(pc))
618                 return NULL;
619
620         mz = page_cgroup_zoneinfo(pc);
621         if (!mz)
622                 return NULL;
623
624         return &mz->reclaim_stat;
625 }
626
627 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
628                                         struct list_head *dst,
629                                         unsigned long *scanned, int order,
630                                         int mode, struct zone *z,
631                                         struct mem_cgroup *mem_cont,
632                                         int active, int file)
633 {
634         unsigned long nr_taken = 0;
635         struct page *page;
636         unsigned long scan;
637         LIST_HEAD(pc_list);
638         struct list_head *src;
639         struct page_cgroup *pc, *tmp;
640         int nid = z->zone_pgdat->node_id;
641         int zid = zone_idx(z);
642         struct mem_cgroup_per_zone *mz;
643         int lru = LRU_FILE * !!file + !!active;
644
645         BUG_ON(!mem_cont);
646         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
647         src = &mz->lists[lru];
648
649         scan = 0;
650         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
651                 if (scan >= nr_to_scan)
652                         break;
653
654                 page = pc->page;
655                 if (unlikely(!PageCgroupUsed(pc)))
656                         continue;
657                 if (unlikely(!PageLRU(page)))
658                         continue;
659
660                 scan++;
661                 if (__isolate_lru_page(page, mode, file) == 0) {
662                         list_move(&page->lru, dst);
663                         nr_taken++;
664                 }
665         }
666
667         *scanned = scan;
668         return nr_taken;
669 }
670
671 #define mem_cgroup_from_res_counter(counter, member)    \
672         container_of(counter, struct mem_cgroup, member)
673
674 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
675 {
676         if (do_swap_account) {
677                 if (res_counter_check_under_limit(&mem->res) &&
678                         res_counter_check_under_limit(&mem->memsw))
679                         return true;
680         } else
681                 if (res_counter_check_under_limit(&mem->res))
682                         return true;
683         return false;
684 }
685
686 static unsigned int get_swappiness(struct mem_cgroup *memcg)
687 {
688         struct cgroup *cgrp = memcg->css.cgroup;
689         unsigned int swappiness;
690
691         /* root ? */
692         if (cgrp->parent == NULL)
693                 return vm_swappiness;
694
695         spin_lock(&memcg->reclaim_param_lock);
696         swappiness = memcg->swappiness;
697         spin_unlock(&memcg->reclaim_param_lock);
698
699         return swappiness;
700 }
701
702 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
703 {
704         int *val = data;
705         (*val)++;
706         return 0;
707 }
708
709 /**
710  * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
711  * @memcg: The memory cgroup that went over limit
712  * @p: Task that is going to be killed
713  *
714  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
715  * enabled
716  */
717 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
718 {
719         struct cgroup *task_cgrp;
720         struct cgroup *mem_cgrp;
721         /*
722          * Need a buffer in BSS, can't rely on allocations. The code relies
723          * on the assumption that OOM is serialized for memory controller.
724          * If this assumption is broken, revisit this code.
725          */
726         static char memcg_name[PATH_MAX];
727         int ret;
728
729         if (!memcg)
730                 return;
731
732
733         rcu_read_lock();
734
735         mem_cgrp = memcg->css.cgroup;
736         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
737
738         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
739         if (ret < 0) {
740                 /*
741                  * Unfortunately, we are unable to convert to a useful name
742                  * But we'll still print out the usage information
743                  */
744                 rcu_read_unlock();
745                 goto done;
746         }
747         rcu_read_unlock();
748
749         printk(KERN_INFO "Task in %s killed", memcg_name);
750
751         rcu_read_lock();
752         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
753         if (ret < 0) {
754                 rcu_read_unlock();
755                 goto done;
756         }
757         rcu_read_unlock();
758
759         /*
760          * Continues from above, so we don't need an KERN_ level
761          */
762         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
763 done:
764
765         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
766                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
767                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
768                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
769         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
770                 "failcnt %llu\n",
771                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
772                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
773                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
774 }
775
776 /*
777  * This function returns the number of memcg under hierarchy tree. Returns
778  * 1(self count) if no children.
779  */
780 static int mem_cgroup_count_children(struct mem_cgroup *mem)
781 {
782         int num = 0;
783         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
784         return num;
785 }
786
787 /*
788  * Visit the first child (need not be the first child as per the ordering
789  * of the cgroup list, since we track last_scanned_child) of @mem and use
790  * that to reclaim free pages from.
791  */
792 static struct mem_cgroup *
793 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
794 {
795         struct mem_cgroup *ret = NULL;
796         struct cgroup_subsys_state *css;
797         int nextid, found;
798
799         if (!root_mem->use_hierarchy) {
800                 css_get(&root_mem->css);
801                 ret = root_mem;
802         }
803
804         while (!ret) {
805                 rcu_read_lock();
806                 nextid = root_mem->last_scanned_child + 1;
807                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
808                                    &found);
809                 if (css && css_tryget(css))
810                         ret = container_of(css, struct mem_cgroup, css);
811
812                 rcu_read_unlock();
813                 /* Updates scanning parameter */
814                 spin_lock(&root_mem->reclaim_param_lock);
815                 if (!css) {
816                         /* this means start scan from ID:1 */
817                         root_mem->last_scanned_child = 0;
818                 } else
819                         root_mem->last_scanned_child = found;
820                 spin_unlock(&root_mem->reclaim_param_lock);
821         }
822
823         return ret;
824 }
825
826 /*
827  * Scan the hierarchy if needed to reclaim memory. We remember the last child
828  * we reclaimed from, so that we don't end up penalizing one child extensively
829  * based on its position in the children list.
830  *
831  * root_mem is the original ancestor that we've been reclaim from.
832  *
833  * We give up and return to the caller when we visit root_mem twice.
834  * (other groups can be removed while we're walking....)
835  *
836  * If shrink==true, for avoiding to free too much, this returns immedieately.
837  */
838 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
839                                    gfp_t gfp_mask, bool noswap, bool shrink)
840 {
841         struct mem_cgroup *victim;
842         int ret, total = 0;
843         int loop = 0;
844
845         while (loop < 2) {
846                 victim = mem_cgroup_select_victim(root_mem);
847                 if (victim == root_mem)
848                         loop++;
849                 if (!mem_cgroup_local_usage(&victim->stat)) {
850                         /* this cgroup's local usage == 0 */
851                         css_put(&victim->css);
852                         continue;
853                 }
854                 /* we use swappiness of local cgroup */
855                 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
856                                                    get_swappiness(victim));
857                 css_put(&victim->css);
858                 /*
859                  * At shrinking usage, we can't check we should stop here or
860                  * reclaim more. It's depends on callers. last_scanned_child
861                  * will work enough for keeping fairness under tree.
862                  */
863                 if (shrink)
864                         return ret;
865                 total += ret;
866                 if (mem_cgroup_check_under_limit(root_mem))
867                         return 1 + total;
868         }
869         return total;
870 }
871
872 bool mem_cgroup_oom_called(struct task_struct *task)
873 {
874         bool ret = false;
875         struct mem_cgroup *mem;
876         struct mm_struct *mm;
877
878         rcu_read_lock();
879         mm = task->mm;
880         if (!mm)
881                 mm = &init_mm;
882         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
883         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
884                 ret = true;
885         rcu_read_unlock();
886         return ret;
887 }
888
889 static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
890 {
891         mem->last_oom_jiffies = jiffies;
892         return 0;
893 }
894
895 static void record_last_oom(struct mem_cgroup *mem)
896 {
897         mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
898 }
899
900
901 /*
902  * Unlike exported interface, "oom" parameter is added. if oom==true,
903  * oom-killer can be invoked.
904  */
905 static int __mem_cgroup_try_charge(struct mm_struct *mm,
906                         gfp_t gfp_mask, struct mem_cgroup **memcg,
907                         bool oom)
908 {
909         struct mem_cgroup *mem, *mem_over_limit;
910         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
911         struct res_counter *fail_res;
912
913         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
914                 /* Don't account this! */
915                 *memcg = NULL;
916                 return 0;
917         }
918
919         /*
920          * We always charge the cgroup the mm_struct belongs to.
921          * The mm_struct's mem_cgroup changes on task migration if the
922          * thread group leader migrates. It's possible that mm is not
923          * set, if so charge the init_mm (happens for pagecache usage).
924          */
925         mem = *memcg;
926         if (likely(!mem)) {
927                 mem = try_get_mem_cgroup_from_mm(mm);
928                 *memcg = mem;
929         } else {
930                 css_get(&mem->css);
931         }
932         if (unlikely(!mem))
933                 return 0;
934
935         VM_BUG_ON(mem_cgroup_is_obsolete(mem));
936
937         while (1) {
938                 int ret;
939                 bool noswap = false;
940
941                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
942                 if (likely(!ret)) {
943                         if (!do_swap_account)
944                                 break;
945                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
946                                                         &fail_res);
947                         if (likely(!ret))
948                                 break;
949                         /* mem+swap counter fails */
950                         res_counter_uncharge(&mem->res, PAGE_SIZE);
951                         noswap = true;
952                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
953                                                                         memsw);
954                 } else
955                         /* mem counter fails */
956                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
957                                                                         res);
958
959                 if (!(gfp_mask & __GFP_WAIT))
960                         goto nomem;
961
962                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
963                                                         noswap, false);
964                 if (ret)
965                         continue;
966
967                 /*
968                  * try_to_free_mem_cgroup_pages() might not give us a full
969                  * picture of reclaim. Some pages are reclaimed and might be
970                  * moved to swap cache or just unmapped from the cgroup.
971                  * Check the limit again to see if the reclaim reduced the
972                  * current usage of the cgroup before giving up
973                  *
974                  */
975                 if (mem_cgroup_check_under_limit(mem_over_limit))
976                         continue;
977
978                 if (!nr_retries--) {
979                         if (oom) {
980                                 mutex_lock(&memcg_tasklist);
981                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
982                                 mutex_unlock(&memcg_tasklist);
983                                 record_last_oom(mem_over_limit);
984                         }
985                         goto nomem;
986                 }
987         }
988         return 0;
989 nomem:
990         css_put(&mem->css);
991         return -ENOMEM;
992 }
993
994
995 /*
996  * A helper function to get mem_cgroup from ID. must be called under
997  * rcu_read_lock(). The caller must check css_is_removed() or some if
998  * it's concern. (dropping refcnt from swap can be called against removed
999  * memcg.)
1000  */
1001 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1002 {
1003         struct cgroup_subsys_state *css;
1004
1005         /* ID 0 is unused ID */
1006         if (!id)
1007                 return NULL;
1008         css = css_lookup(&mem_cgroup_subsys, id);
1009         if (!css)
1010                 return NULL;
1011         return container_of(css, struct mem_cgroup, css);
1012 }
1013
1014 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
1015 {
1016         struct mem_cgroup *mem;
1017         struct page_cgroup *pc;
1018         unsigned short id;
1019         swp_entry_t ent;
1020
1021         VM_BUG_ON(!PageLocked(page));
1022
1023         if (!PageSwapCache(page))
1024                 return NULL;
1025
1026         pc = lookup_page_cgroup(page);
1027         /*
1028          * Used bit of swapcache is solid under page lock.
1029          */
1030         if (PageCgroupUsed(pc)) {
1031                 mem = pc->mem_cgroup;
1032                 if (mem && !css_tryget(&mem->css))
1033                         mem = NULL;
1034         } else {
1035                 ent.val = page_private(page);
1036                 id = lookup_swap_cgroup(ent);
1037                 rcu_read_lock();
1038                 mem = mem_cgroup_lookup(id);
1039                 if (mem && !css_tryget(&mem->css))
1040                         mem = NULL;
1041                 rcu_read_unlock();
1042         }
1043         return mem;
1044 }
1045
1046 /*
1047  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1048  * USED state. If already USED, uncharge and return.
1049  */
1050
1051 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1052                                      struct page_cgroup *pc,
1053                                      enum charge_type ctype)
1054 {
1055         /* try_charge() can return NULL to *memcg, taking care of it. */
1056         if (!mem)
1057                 return;
1058
1059         lock_page_cgroup(pc);
1060         if (unlikely(PageCgroupUsed(pc))) {
1061                 unlock_page_cgroup(pc);
1062                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1063                 if (do_swap_account)
1064                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1065                 css_put(&mem->css);
1066                 return;
1067         }
1068         pc->mem_cgroup = mem;
1069         smp_wmb();
1070         pc->flags = pcg_default_flags[ctype];
1071
1072         mem_cgroup_charge_statistics(mem, pc, true);
1073
1074         unlock_page_cgroup(pc);
1075 }
1076
1077 /**
1078  * mem_cgroup_move_account - move account of the page
1079  * @pc: page_cgroup of the page.
1080  * @from: mem_cgroup which the page is moved from.
1081  * @to: mem_cgroup which the page is moved to. @from != @to.
1082  *
1083  * The caller must confirm following.
1084  * - page is not on LRU (isolate_page() is useful.)
1085  *
1086  * returns 0 at success,
1087  * returns -EBUSY when lock is busy or "pc" is unstable.
1088  *
1089  * This function does "uncharge" from old cgroup but doesn't do "charge" to
1090  * new cgroup. It should be done by a caller.
1091  */
1092
1093 static int mem_cgroup_move_account(struct page_cgroup *pc,
1094         struct mem_cgroup *from, struct mem_cgroup *to)
1095 {
1096         struct mem_cgroup_per_zone *from_mz, *to_mz;
1097         int nid, zid;
1098         int ret = -EBUSY;
1099
1100         VM_BUG_ON(from == to);
1101         VM_BUG_ON(PageLRU(pc->page));
1102
1103         nid = page_cgroup_nid(pc);
1104         zid = page_cgroup_zid(pc);
1105         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1106         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1107
1108         if (!trylock_page_cgroup(pc))
1109                 return ret;
1110
1111         if (!PageCgroupUsed(pc))
1112                 goto out;
1113
1114         if (pc->mem_cgroup != from)
1115                 goto out;
1116
1117         res_counter_uncharge(&from->res, PAGE_SIZE);
1118         mem_cgroup_charge_statistics(from, pc, false);
1119         if (do_swap_account)
1120                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1121         css_put(&from->css);
1122
1123         css_get(&to->css);
1124         pc->mem_cgroup = to;
1125         mem_cgroup_charge_statistics(to, pc, true);
1126         ret = 0;
1127 out:
1128         unlock_page_cgroup(pc);
1129         return ret;
1130 }
1131
1132 /*
1133  * move charges to its parent.
1134  */
1135
1136 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1137                                   struct mem_cgroup *child,
1138                                   gfp_t gfp_mask)
1139 {
1140         struct page *page = pc->page;
1141         struct cgroup *cg = child->css.cgroup;
1142         struct cgroup *pcg = cg->parent;
1143         struct mem_cgroup *parent;
1144         int ret;
1145
1146         /* Is ROOT ? */
1147         if (!pcg)
1148                 return -EINVAL;
1149
1150
1151         parent = mem_cgroup_from_cont(pcg);
1152
1153
1154         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1155         if (ret || !parent)
1156                 return ret;
1157
1158         if (!get_page_unless_zero(page)) {
1159                 ret = -EBUSY;
1160                 goto uncharge;
1161         }
1162
1163         ret = isolate_lru_page(page);
1164
1165         if (ret)
1166                 goto cancel;
1167
1168         ret = mem_cgroup_move_account(pc, child, parent);
1169
1170         putback_lru_page(page);
1171         if (!ret) {
1172                 put_page(page);
1173                 /* drop extra refcnt by try_charge() */
1174                 css_put(&parent->css);
1175                 return 0;
1176         }
1177
1178 cancel:
1179         put_page(page);
1180 uncharge:
1181         /* drop extra refcnt by try_charge() */
1182         css_put(&parent->css);
1183         /* uncharge if move fails */
1184         res_counter_uncharge(&parent->res, PAGE_SIZE);
1185         if (do_swap_account)
1186                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1187         return ret;
1188 }
1189
1190 /*
1191  * Charge the memory controller for page usage.
1192  * Return
1193  * 0 if the charge was successful
1194  * < 0 if the cgroup is over its limit
1195  */
1196 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1197                                 gfp_t gfp_mask, enum charge_type ctype,
1198                                 struct mem_cgroup *memcg)
1199 {
1200         struct mem_cgroup *mem;
1201         struct page_cgroup *pc;
1202         int ret;
1203
1204         pc = lookup_page_cgroup(page);
1205         /* can happen at boot */
1206         if (unlikely(!pc))
1207                 return 0;
1208         prefetchw(pc);
1209
1210         mem = memcg;
1211         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1212         if (ret || !mem)
1213                 return ret;
1214
1215         __mem_cgroup_commit_charge(mem, pc, ctype);
1216         return 0;
1217 }
1218
1219 int mem_cgroup_newpage_charge(struct page *page,
1220                               struct mm_struct *mm, gfp_t gfp_mask)
1221 {
1222         if (mem_cgroup_disabled())
1223                 return 0;
1224         if (PageCompound(page))
1225                 return 0;
1226         /*
1227          * If already mapped, we don't have to account.
1228          * If page cache, page->mapping has address_space.
1229          * But page->mapping may have out-of-use anon_vma pointer,
1230          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1231          * is NULL.
1232          */
1233         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1234                 return 0;
1235         if (unlikely(!mm))
1236                 mm = &init_mm;
1237         return mem_cgroup_charge_common(page, mm, gfp_mask,
1238                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1239 }
1240
1241 static void
1242 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1243                                         enum charge_type ctype);
1244
1245 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1246                                 gfp_t gfp_mask)
1247 {
1248         struct mem_cgroup *mem = NULL;
1249         int ret;
1250
1251         if (mem_cgroup_disabled())
1252                 return 0;
1253         if (PageCompound(page))
1254                 return 0;
1255         /*
1256          * Corner case handling. This is called from add_to_page_cache()
1257          * in usual. But some FS (shmem) precharges this page before calling it
1258          * and call add_to_page_cache() with GFP_NOWAIT.
1259          *
1260          * For GFP_NOWAIT case, the page may be pre-charged before calling
1261          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1262          * charge twice. (It works but has to pay a bit larger cost.)
1263          * And when the page is SwapCache, it should take swap information
1264          * into account. This is under lock_page() now.
1265          */
1266         if (!(gfp_mask & __GFP_WAIT)) {
1267                 struct page_cgroup *pc;
1268
1269
1270                 pc = lookup_page_cgroup(page);
1271                 if (!pc)
1272                         return 0;
1273                 lock_page_cgroup(pc);
1274                 if (PageCgroupUsed(pc)) {
1275                         unlock_page_cgroup(pc);
1276                         return 0;
1277                 }
1278                 unlock_page_cgroup(pc);
1279         }
1280
1281         if (unlikely(!mm && !mem))
1282                 mm = &init_mm;
1283
1284         if (page_is_file_cache(page))
1285                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1286                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1287
1288         /* shmem */
1289         if (PageSwapCache(page)) {
1290                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1291                 if (!ret)
1292                         __mem_cgroup_commit_charge_swapin(page, mem,
1293                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
1294         } else
1295                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1296                                         MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1297
1298         return ret;
1299 }
1300
1301 /*
1302  * While swap-in, try_charge -> commit or cancel, the page is locked.
1303  * And when try_charge() successfully returns, one refcnt to memcg without
1304  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1305  * "commit()" or removed by "cancel()"
1306  */
1307 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1308                                  struct page *page,
1309                                  gfp_t mask, struct mem_cgroup **ptr)
1310 {
1311         struct mem_cgroup *mem;
1312         int ret;
1313
1314         if (mem_cgroup_disabled())
1315                 return 0;
1316
1317         if (!do_swap_account)
1318                 goto charge_cur_mm;
1319         /*
1320          * A racing thread's fault, or swapoff, may have already updated
1321          * the pte, and even removed page from swap cache: return success
1322          * to go on to do_swap_page()'s pte_same() test, which should fail.
1323          */
1324         if (!PageSwapCache(page))
1325                 return 0;
1326         mem = try_get_mem_cgroup_from_swapcache(page);
1327         if (!mem)
1328                 goto charge_cur_mm;
1329         *ptr = mem;
1330         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1331         /* drop extra refcnt from tryget */
1332         css_put(&mem->css);
1333         return ret;
1334 charge_cur_mm:
1335         if (unlikely(!mm))
1336                 mm = &init_mm;
1337         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1338 }
1339
1340 static void
1341 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1342                                         enum charge_type ctype)
1343 {
1344         struct page_cgroup *pc;
1345
1346         if (mem_cgroup_disabled())
1347                 return;
1348         if (!ptr)
1349                 return;
1350         pc = lookup_page_cgroup(page);
1351         mem_cgroup_lru_del_before_commit_swapcache(page);
1352         __mem_cgroup_commit_charge(ptr, pc, ctype);
1353         mem_cgroup_lru_add_after_commit_swapcache(page);
1354         /*
1355          * Now swap is on-memory. This means this page may be
1356          * counted both as mem and swap....double count.
1357          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1358          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1359          * may call delete_from_swap_cache() before reach here.
1360          */
1361         if (do_swap_account && PageSwapCache(page)) {
1362                 swp_entry_t ent = {.val = page_private(page)};
1363                 unsigned short id;
1364                 struct mem_cgroup *memcg;
1365
1366                 id = swap_cgroup_record(ent, 0);
1367                 rcu_read_lock();
1368                 memcg = mem_cgroup_lookup(id);
1369                 if (memcg) {
1370                         /*
1371                          * This recorded memcg can be obsolete one. So, avoid
1372                          * calling css_tryget
1373                          */
1374                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1375                         mem_cgroup_put(memcg);
1376                 }
1377                 rcu_read_unlock();
1378         }
1379         /* add this page(page_cgroup) to the LRU we want. */
1380
1381 }
1382
1383 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1384 {
1385         __mem_cgroup_commit_charge_swapin(page, ptr,
1386                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
1387 }
1388
1389 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1390 {
1391         if (mem_cgroup_disabled())
1392                 return;
1393         if (!mem)
1394                 return;
1395         res_counter_uncharge(&mem->res, PAGE_SIZE);
1396         if (do_swap_account)
1397                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1398         css_put(&mem->css);
1399 }
1400
1401
1402 /*
1403  * uncharge if !page_mapped(page)
1404  */
1405 static struct mem_cgroup *
1406 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1407 {
1408         struct page_cgroup *pc;
1409         struct mem_cgroup *mem = NULL;
1410         struct mem_cgroup_per_zone *mz;
1411
1412         if (mem_cgroup_disabled())
1413                 return NULL;
1414
1415         if (PageSwapCache(page))
1416                 return NULL;
1417
1418         /*
1419          * Check if our page_cgroup is valid
1420          */
1421         pc = lookup_page_cgroup(page);
1422         if (unlikely(!pc || !PageCgroupUsed(pc)))
1423                 return NULL;
1424
1425         lock_page_cgroup(pc);
1426
1427         mem = pc->mem_cgroup;
1428
1429         if (!PageCgroupUsed(pc))
1430                 goto unlock_out;
1431
1432         switch (ctype) {
1433         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1434                 if (page_mapped(page))
1435                         goto unlock_out;
1436                 break;
1437         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1438                 if (!PageAnon(page)) {  /* Shared memory */
1439                         if (page->mapping && !page_is_file_cache(page))
1440                                 goto unlock_out;
1441                 } else if (page_mapped(page)) /* Anon */
1442                                 goto unlock_out;
1443                 break;
1444         default:
1445                 break;
1446         }
1447
1448         res_counter_uncharge(&mem->res, PAGE_SIZE);
1449         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1450                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1451         mem_cgroup_charge_statistics(mem, pc, false);
1452
1453         ClearPageCgroupUsed(pc);
1454         /*
1455          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1456          * freed from LRU. This is safe because uncharged page is expected not
1457          * to be reused (freed soon). Exception is SwapCache, it's handled by
1458          * special functions.
1459          */
1460
1461         mz = page_cgroup_zoneinfo(pc);
1462         unlock_page_cgroup(pc);
1463
1464         /* at swapout, this memcg will be accessed to record to swap */
1465         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1466                 css_put(&mem->css);
1467
1468         return mem;
1469
1470 unlock_out:
1471         unlock_page_cgroup(pc);
1472         return NULL;
1473 }
1474
1475 void mem_cgroup_uncharge_page(struct page *page)
1476 {
1477         /* early check. */
1478         if (page_mapped(page))
1479                 return;
1480         if (page->mapping && !PageAnon(page))
1481                 return;
1482         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1483 }
1484
1485 void mem_cgroup_uncharge_cache_page(struct page *page)
1486 {
1487         VM_BUG_ON(page_mapped(page));
1488         VM_BUG_ON(page->mapping);
1489         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1490 }
1491
1492 /*
1493  * called from __delete_from_swap_cache() and drop "page" account.
1494  * memcg information is recorded to swap_cgroup of "ent"
1495  */
1496 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1497 {
1498         struct mem_cgroup *memcg;
1499
1500         memcg = __mem_cgroup_uncharge_common(page,
1501                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1502         /* record memcg information */
1503         if (do_swap_account && memcg) {
1504                 swap_cgroup_record(ent, css_id(&memcg->css));
1505                 mem_cgroup_get(memcg);
1506         }
1507         if (memcg)
1508                 css_put(&memcg->css);
1509 }
1510
1511 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1512 /*
1513  * called from swap_entry_free(). remove record in swap_cgroup and
1514  * uncharge "memsw" account.
1515  */
1516 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1517 {
1518         struct mem_cgroup *memcg;
1519         unsigned short id;
1520
1521         if (!do_swap_account)
1522                 return;
1523
1524         id = swap_cgroup_record(ent, 0);
1525         rcu_read_lock();
1526         memcg = mem_cgroup_lookup(id);
1527         if (memcg) {
1528                 /*
1529                  * We uncharge this because swap is freed.
1530                  * This memcg can be obsolete one. We avoid calling css_tryget
1531                  */
1532                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1533                 mem_cgroup_put(memcg);
1534         }
1535         rcu_read_unlock();
1536 }
1537 #endif
1538
1539 /*
1540  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1541  * page belongs to.
1542  */
1543 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1544 {
1545         struct page_cgroup *pc;
1546         struct mem_cgroup *mem = NULL;
1547         int ret = 0;
1548
1549         if (mem_cgroup_disabled())
1550                 return 0;
1551
1552         pc = lookup_page_cgroup(page);
1553         lock_page_cgroup(pc);
1554         if (PageCgroupUsed(pc)) {
1555                 mem = pc->mem_cgroup;
1556                 css_get(&mem->css);
1557         }
1558         unlock_page_cgroup(pc);
1559
1560         if (mem) {
1561                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1562                 css_put(&mem->css);
1563         }
1564         *ptr = mem;
1565         return ret;
1566 }
1567
1568 /* remove redundant charge if migration failed*/
1569 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1570                 struct page *oldpage, struct page *newpage)
1571 {
1572         struct page *target, *unused;
1573         struct page_cgroup *pc;
1574         enum charge_type ctype;
1575
1576         if (!mem)
1577                 return;
1578
1579         /* at migration success, oldpage->mapping is NULL. */
1580         if (oldpage->mapping) {
1581                 target = oldpage;
1582                 unused = NULL;
1583         } else {
1584                 target = newpage;
1585                 unused = oldpage;
1586         }
1587
1588         if (PageAnon(target))
1589                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1590         else if (page_is_file_cache(target))
1591                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1592         else
1593                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1594
1595         /* unused page is not on radix-tree now. */
1596         if (unused)
1597                 __mem_cgroup_uncharge_common(unused, ctype);
1598
1599         pc = lookup_page_cgroup(target);
1600         /*
1601          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1602          * So, double-counting is effectively avoided.
1603          */
1604         __mem_cgroup_commit_charge(mem, pc, ctype);
1605
1606         /*
1607          * Both of oldpage and newpage are still under lock_page().
1608          * Then, we don't have to care about race in radix-tree.
1609          * But we have to be careful that this page is unmapped or not.
1610          *
1611          * There is a case for !page_mapped(). At the start of
1612          * migration, oldpage was mapped. But now, it's zapped.
1613          * But we know *target* page is not freed/reused under us.
1614          * mem_cgroup_uncharge_page() does all necessary checks.
1615          */
1616         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1617                 mem_cgroup_uncharge_page(target);
1618 }
1619
1620 /*
1621  * A call to try to shrink memory usage under specified resource controller.
1622  * This is typically used for page reclaiming for shmem for reducing side
1623  * effect of page allocation from shmem, which is used by some mem_cgroup.
1624  */
1625 int mem_cgroup_shrink_usage(struct page *page,
1626                             struct mm_struct *mm,
1627                             gfp_t gfp_mask)
1628 {
1629         struct mem_cgroup *mem = NULL;
1630         int progress = 0;
1631         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1632
1633         if (mem_cgroup_disabled())
1634                 return 0;
1635         if (page)
1636                 mem = try_get_mem_cgroup_from_swapcache(page);
1637         if (!mem && mm)
1638                 mem = try_get_mem_cgroup_from_mm(mm);
1639         if (unlikely(!mem))
1640                 return 0;
1641
1642         do {
1643                 progress = mem_cgroup_hierarchical_reclaim(mem,
1644                                         gfp_mask, true, false);
1645                 progress += mem_cgroup_check_under_limit(mem);
1646         } while (!progress && --retry);
1647
1648         css_put(&mem->css);
1649         if (!retry)
1650                 return -ENOMEM;
1651         return 0;
1652 }
1653
1654 static DEFINE_MUTEX(set_limit_mutex);
1655
1656 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1657                                 unsigned long long val)
1658 {
1659         int retry_count;
1660         int progress;
1661         u64 memswlimit;
1662         int ret = 0;
1663         int children = mem_cgroup_count_children(memcg);
1664         u64 curusage, oldusage;
1665
1666         /*
1667          * For keeping hierarchical_reclaim simple, how long we should retry
1668          * is depends on callers. We set our retry-count to be function
1669          * of # of children which we should visit in this loop.
1670          */
1671         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1672
1673         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1674
1675         while (retry_count) {
1676                 if (signal_pending(current)) {
1677                         ret = -EINTR;
1678                         break;
1679                 }
1680                 /*
1681                  * Rather than hide all in some function, I do this in
1682                  * open coded manner. You see what this really does.
1683                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1684                  */
1685                 mutex_lock(&set_limit_mutex);
1686                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1687                 if (memswlimit < val) {
1688                         ret = -EINVAL;
1689                         mutex_unlock(&set_limit_mutex);
1690                         break;
1691                 }
1692                 ret = res_counter_set_limit(&memcg->res, val);
1693                 mutex_unlock(&set_limit_mutex);
1694
1695                 if (!ret)
1696                         break;
1697
1698                 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1699                                                    false, true);
1700                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1701                 /* Usage is reduced ? */
1702                 if (curusage >= oldusage)
1703                         retry_count--;
1704                 else
1705                         oldusage = curusage;
1706         }
1707
1708         return ret;
1709 }
1710
1711 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1712                                 unsigned long long val)
1713 {
1714         int retry_count;
1715         u64 memlimit, oldusage, curusage;
1716         int children = mem_cgroup_count_children(memcg);
1717         int ret = -EBUSY;
1718
1719         if (!do_swap_account)
1720                 return -EINVAL;
1721         /* see mem_cgroup_resize_res_limit */
1722         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1723         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1724         while (retry_count) {
1725                 if (signal_pending(current)) {
1726                         ret = -EINTR;
1727                         break;
1728                 }
1729                 /*
1730                  * Rather than hide all in some function, I do this in
1731                  * open coded manner. You see what this really does.
1732                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1733                  */
1734                 mutex_lock(&set_limit_mutex);
1735                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1736                 if (memlimit > val) {
1737                         ret = -EINVAL;
1738                         mutex_unlock(&set_limit_mutex);
1739                         break;
1740                 }
1741                 ret = res_counter_set_limit(&memcg->memsw, val);
1742                 mutex_unlock(&set_limit_mutex);
1743
1744                 if (!ret)
1745                         break;
1746
1747                 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1748                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1749                 /* Usage is reduced ? */
1750                 if (curusage >= oldusage)
1751                         retry_count--;
1752                 else
1753                         oldusage = curusage;
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * This routine traverse page_cgroup in given list and drop them all.
1760  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1761  */
1762 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1763                                 int node, int zid, enum lru_list lru)
1764 {
1765         struct zone *zone;
1766         struct mem_cgroup_per_zone *mz;
1767         struct page_cgroup *pc, *busy;
1768         unsigned long flags, loop;
1769         struct list_head *list;
1770         int ret = 0;
1771
1772         zone = &NODE_DATA(node)->node_zones[zid];
1773         mz = mem_cgroup_zoneinfo(mem, node, zid);
1774         list = &mz->lists[lru];
1775
1776         loop = MEM_CGROUP_ZSTAT(mz, lru);
1777         /* give some margin against EBUSY etc...*/
1778         loop += 256;
1779         busy = NULL;
1780         while (loop--) {
1781                 ret = 0;
1782                 spin_lock_irqsave(&zone->lru_lock, flags);
1783                 if (list_empty(list)) {
1784                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1785                         break;
1786                 }
1787                 pc = list_entry(list->prev, struct page_cgroup, lru);
1788                 if (busy == pc) {
1789                         list_move(&pc->lru, list);
1790                         busy = 0;
1791                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1792                         continue;
1793                 }
1794                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1795
1796                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1797                 if (ret == -ENOMEM)
1798                         break;
1799
1800                 if (ret == -EBUSY || ret == -EINVAL) {
1801                         /* found lock contention or "pc" is obsolete. */
1802                         busy = pc;
1803                         cond_resched();
1804                 } else
1805                         busy = NULL;
1806         }
1807
1808         if (!ret && !list_empty(list))
1809                 return -EBUSY;
1810         return ret;
1811 }
1812
1813 /*
1814  * make mem_cgroup's charge to be 0 if there is no task.
1815  * This enables deleting this mem_cgroup.
1816  */
1817 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1818 {
1819         int ret;
1820         int node, zid, shrink;
1821         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1822         struct cgroup *cgrp = mem->css.cgroup;
1823
1824         css_get(&mem->css);
1825
1826         shrink = 0;
1827         /* should free all ? */
1828         if (free_all)
1829                 goto try_to_free;
1830 move_account:
1831         while (mem->res.usage > 0) {
1832                 ret = -EBUSY;
1833                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1834                         goto out;
1835                 ret = -EINTR;
1836                 if (signal_pending(current))
1837                         goto out;
1838                 /* This is for making all *used* pages to be on LRU. */
1839                 lru_add_drain_all();
1840                 ret = 0;
1841                 for_each_node_state(node, N_HIGH_MEMORY) {
1842                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1843                                 enum lru_list l;
1844                                 for_each_lru(l) {
1845                                         ret = mem_cgroup_force_empty_list(mem,
1846                                                         node, zid, l);
1847                                         if (ret)
1848                                                 break;
1849                                 }
1850                         }
1851                         if (ret)
1852                                 break;
1853                 }
1854                 /* it seems parent cgroup doesn't have enough mem */
1855                 if (ret == -ENOMEM)
1856                         goto try_to_free;
1857                 cond_resched();
1858         }
1859         ret = 0;
1860 out:
1861         css_put(&mem->css);
1862         return ret;
1863
1864 try_to_free:
1865         /* returns EBUSY if there is a task or if we come here twice. */
1866         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1867                 ret = -EBUSY;
1868                 goto out;
1869         }
1870         /* we call try-to-free pages for make this cgroup empty */
1871         lru_add_drain_all();
1872         /* try to free all pages in this cgroup */
1873         shrink = 1;
1874         while (nr_retries && mem->res.usage > 0) {
1875                 int progress;
1876
1877                 if (signal_pending(current)) {
1878                         ret = -EINTR;
1879                         goto out;
1880                 }
1881                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1882                                                 false, get_swappiness(mem));
1883                 if (!progress) {
1884                         nr_retries--;
1885                         /* maybe some writeback is necessary */
1886                         congestion_wait(WRITE, HZ/10);
1887                 }
1888
1889         }
1890         lru_add_drain();
1891         /* try move_account...there may be some *locked* pages. */
1892         if (mem->res.usage)
1893                 goto move_account;
1894         ret = 0;
1895         goto out;
1896 }
1897
1898 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1899 {
1900         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1901 }
1902
1903
1904 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1905 {
1906         return mem_cgroup_from_cont(cont)->use_hierarchy;
1907 }
1908
1909 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1910                                         u64 val)
1911 {
1912         int retval = 0;
1913         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1914         struct cgroup *parent = cont->parent;
1915         struct mem_cgroup *parent_mem = NULL;
1916
1917         if (parent)
1918                 parent_mem = mem_cgroup_from_cont(parent);
1919
1920         cgroup_lock();
1921         /*
1922          * If parent's use_hiearchy is set, we can't make any modifications
1923          * in the child subtrees. If it is unset, then the change can
1924          * occur, provided the current cgroup has no children.
1925          *
1926          * For the root cgroup, parent_mem is NULL, we allow value to be
1927          * set if there are no children.
1928          */
1929         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1930                                 (val == 1 || val == 0)) {
1931                 if (list_empty(&cont->children))
1932                         mem->use_hierarchy = val;
1933                 else
1934                         retval = -EBUSY;
1935         } else
1936                 retval = -EINVAL;
1937         cgroup_unlock();
1938
1939         return retval;
1940 }
1941
1942 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1943 {
1944         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1945         u64 val = 0;
1946         int type, name;
1947
1948         type = MEMFILE_TYPE(cft->private);
1949         name = MEMFILE_ATTR(cft->private);
1950         switch (type) {
1951         case _MEM:
1952                 val = res_counter_read_u64(&mem->res, name);
1953                 break;
1954         case _MEMSWAP:
1955                 if (do_swap_account)
1956                         val = res_counter_read_u64(&mem->memsw, name);
1957                 break;
1958         default:
1959                 BUG();
1960                 break;
1961         }
1962         return val;
1963 }
1964 /*
1965  * The user of this function is...
1966  * RES_LIMIT.
1967  */
1968 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1969                             const char *buffer)
1970 {
1971         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1972         int type, name;
1973         unsigned long long val;
1974         int ret;
1975
1976         type = MEMFILE_TYPE(cft->private);
1977         name = MEMFILE_ATTR(cft->private);
1978         switch (name) {
1979         case RES_LIMIT:
1980                 /* This function does all necessary parse...reuse it */
1981                 ret = res_counter_memparse_write_strategy(buffer, &val);
1982                 if (ret)
1983                         break;
1984                 if (type == _MEM)
1985                         ret = mem_cgroup_resize_limit(memcg, val);
1986                 else
1987                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1988                 break;
1989         default:
1990                 ret = -EINVAL; /* should be BUG() ? */
1991                 break;
1992         }
1993         return ret;
1994 }
1995
1996 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
1997                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
1998 {
1999         struct cgroup *cgroup;
2000         unsigned long long min_limit, min_memsw_limit, tmp;
2001
2002         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2003         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2004         cgroup = memcg->css.cgroup;
2005         if (!memcg->use_hierarchy)
2006                 goto out;
2007
2008         while (cgroup->parent) {
2009                 cgroup = cgroup->parent;
2010                 memcg = mem_cgroup_from_cont(cgroup);
2011                 if (!memcg->use_hierarchy)
2012                         break;
2013                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
2014                 min_limit = min(min_limit, tmp);
2015                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2016                 min_memsw_limit = min(min_memsw_limit, tmp);
2017         }
2018 out:
2019         *mem_limit = min_limit;
2020         *memsw_limit = min_memsw_limit;
2021         return;
2022 }
2023
2024 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
2025 {
2026         struct mem_cgroup *mem;
2027         int type, name;
2028
2029         mem = mem_cgroup_from_cont(cont);
2030         type = MEMFILE_TYPE(event);
2031         name = MEMFILE_ATTR(event);
2032         switch (name) {
2033         case RES_MAX_USAGE:
2034                 if (type == _MEM)
2035                         res_counter_reset_max(&mem->res);
2036                 else
2037                         res_counter_reset_max(&mem->memsw);
2038                 break;
2039         case RES_FAILCNT:
2040                 if (type == _MEM)
2041                         res_counter_reset_failcnt(&mem->res);
2042                 else
2043                         res_counter_reset_failcnt(&mem->memsw);
2044                 break;
2045         }
2046         return 0;
2047 }
2048
2049
2050 /* For read statistics */
2051 enum {
2052         MCS_CACHE,
2053         MCS_RSS,
2054         MCS_PGPGIN,
2055         MCS_PGPGOUT,
2056         MCS_INACTIVE_ANON,
2057         MCS_ACTIVE_ANON,
2058         MCS_INACTIVE_FILE,
2059         MCS_ACTIVE_FILE,
2060         MCS_UNEVICTABLE,
2061         NR_MCS_STAT,
2062 };
2063
2064 struct mcs_total_stat {
2065         s64 stat[NR_MCS_STAT];
2066 };
2067
2068 struct {
2069         char *local_name;
2070         char *total_name;
2071 } memcg_stat_strings[NR_MCS_STAT] = {
2072         {"cache", "total_cache"},
2073         {"rss", "total_rss"},
2074         {"pgpgin", "total_pgpgin"},
2075         {"pgpgout", "total_pgpgout"},
2076         {"inactive_anon", "total_inactive_anon"},
2077         {"active_anon", "total_active_anon"},
2078         {"inactive_file", "total_inactive_file"},
2079         {"active_file", "total_active_file"},
2080         {"unevictable", "total_unevictable"}
2081 };
2082
2083
2084 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2085 {
2086         struct mcs_total_stat *s = data;
2087         s64 val;
2088
2089         /* per cpu stat */
2090         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2091         s->stat[MCS_CACHE] += val * PAGE_SIZE;
2092         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2093         s->stat[MCS_RSS] += val * PAGE_SIZE;
2094         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2095         s->stat[MCS_PGPGIN] += val;
2096         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2097         s->stat[MCS_PGPGOUT] += val;
2098
2099         /* per zone stat */
2100         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2101         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2102         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2103         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2104         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2105         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2106         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2107         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2108         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2109         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2110         return 0;
2111 }
2112
2113 static void
2114 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2115 {
2116         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2117 }
2118
2119 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2120                                  struct cgroup_map_cb *cb)
2121 {
2122         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2123         struct mcs_total_stat mystat;
2124         int i;
2125
2126         memset(&mystat, 0, sizeof(mystat));
2127         mem_cgroup_get_local_stat(mem_cont, &mystat);
2128
2129         for (i = 0; i < NR_MCS_STAT; i++)
2130                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2131
2132         /* Hierarchical information */
2133         {
2134                 unsigned long long limit, memsw_limit;
2135                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2136                 cb->fill(cb, "hierarchical_memory_limit", limit);
2137                 if (do_swap_account)
2138                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2139         }
2140
2141         memset(&mystat, 0, sizeof(mystat));
2142         mem_cgroup_get_total_stat(mem_cont, &mystat);
2143         for (i = 0; i < NR_MCS_STAT; i++)
2144                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2145
2146
2147 #ifdef CONFIG_DEBUG_VM
2148         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2149
2150         {
2151                 int nid, zid;
2152                 struct mem_cgroup_per_zone *mz;
2153                 unsigned long recent_rotated[2] = {0, 0};
2154                 unsigned long recent_scanned[2] = {0, 0};
2155
2156                 for_each_online_node(nid)
2157                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2158                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2159
2160                                 recent_rotated[0] +=
2161                                         mz->reclaim_stat.recent_rotated[0];
2162                                 recent_rotated[1] +=
2163                                         mz->reclaim_stat.recent_rotated[1];
2164                                 recent_scanned[0] +=
2165                                         mz->reclaim_stat.recent_scanned[0];
2166                                 recent_scanned[1] +=
2167                                         mz->reclaim_stat.recent_scanned[1];
2168                         }
2169                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2170                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2171                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2172                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2173         }
2174 #endif
2175
2176         return 0;
2177 }
2178
2179 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2180 {
2181         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2182
2183         return get_swappiness(memcg);
2184 }
2185
2186 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2187                                        u64 val)
2188 {
2189         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2190         struct mem_cgroup *parent;
2191
2192         if (val > 100)
2193                 return -EINVAL;
2194
2195         if (cgrp->parent == NULL)
2196                 return -EINVAL;
2197
2198         parent = mem_cgroup_from_cont(cgrp->parent);
2199
2200         cgroup_lock();
2201
2202         /* If under hierarchy, only empty-root can set this value */
2203         if ((parent->use_hierarchy) ||
2204             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2205                 cgroup_unlock();
2206                 return -EINVAL;
2207         }
2208
2209         spin_lock(&memcg->reclaim_param_lock);
2210         memcg->swappiness = val;
2211         spin_unlock(&memcg->reclaim_param_lock);
2212
2213         cgroup_unlock();
2214
2215         return 0;
2216 }
2217
2218
2219 static struct cftype mem_cgroup_files[] = {
2220         {
2221                 .name = "usage_in_bytes",
2222                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2223                 .read_u64 = mem_cgroup_read,
2224         },
2225         {
2226                 .name = "max_usage_in_bytes",
2227                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2228                 .trigger = mem_cgroup_reset,
2229                 .read_u64 = mem_cgroup_read,
2230         },
2231         {
2232                 .name = "limit_in_bytes",
2233                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2234                 .write_string = mem_cgroup_write,
2235                 .read_u64 = mem_cgroup_read,
2236         },
2237         {
2238                 .name = "failcnt",
2239                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2240                 .trigger = mem_cgroup_reset,
2241                 .read_u64 = mem_cgroup_read,
2242         },
2243         {
2244                 .name = "stat",
2245                 .read_map = mem_control_stat_show,
2246         },
2247         {
2248                 .name = "force_empty",
2249                 .trigger = mem_cgroup_force_empty_write,
2250         },
2251         {
2252                 .name = "use_hierarchy",
2253                 .write_u64 = mem_cgroup_hierarchy_write,
2254                 .read_u64 = mem_cgroup_hierarchy_read,
2255         },
2256         {
2257                 .name = "swappiness",
2258                 .read_u64 = mem_cgroup_swappiness_read,
2259                 .write_u64 = mem_cgroup_swappiness_write,
2260         },
2261 };
2262
2263 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2264 static struct cftype memsw_cgroup_files[] = {
2265         {
2266                 .name = "memsw.usage_in_bytes",
2267                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2268                 .read_u64 = mem_cgroup_read,
2269         },
2270         {
2271                 .name = "memsw.max_usage_in_bytes",
2272                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2273                 .trigger = mem_cgroup_reset,
2274                 .read_u64 = mem_cgroup_read,
2275         },
2276         {
2277                 .name = "memsw.limit_in_bytes",
2278                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2279                 .write_string = mem_cgroup_write,
2280                 .read_u64 = mem_cgroup_read,
2281         },
2282         {
2283                 .name = "memsw.failcnt",
2284                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2285                 .trigger = mem_cgroup_reset,
2286                 .read_u64 = mem_cgroup_read,
2287         },
2288 };
2289
2290 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2291 {
2292         if (!do_swap_account)
2293                 return 0;
2294         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2295                                 ARRAY_SIZE(memsw_cgroup_files));
2296 };
2297 #else
2298 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2299 {
2300         return 0;
2301 }
2302 #endif
2303
2304 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2305 {
2306         struct mem_cgroup_per_node *pn;
2307         struct mem_cgroup_per_zone *mz;
2308         enum lru_list l;
2309         int zone, tmp = node;
2310         /*
2311          * This routine is called against possible nodes.
2312          * But it's BUG to call kmalloc() against offline node.
2313          *
2314          * TODO: this routine can waste much memory for nodes which will
2315          *       never be onlined. It's better to use memory hotplug callback
2316          *       function.
2317          */
2318         if (!node_state(node, N_NORMAL_MEMORY))
2319                 tmp = -1;
2320         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2321         if (!pn)
2322                 return 1;
2323
2324         mem->info.nodeinfo[node] = pn;
2325         memset(pn, 0, sizeof(*pn));
2326
2327         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2328                 mz = &pn->zoneinfo[zone];
2329                 for_each_lru(l)
2330                         INIT_LIST_HEAD(&mz->lists[l]);
2331         }
2332         return 0;
2333 }
2334
2335 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2336 {
2337         kfree(mem->info.nodeinfo[node]);
2338 }
2339
2340 static int mem_cgroup_size(void)
2341 {
2342         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2343         return sizeof(struct mem_cgroup) + cpustat_size;
2344 }
2345
2346 static struct mem_cgroup *mem_cgroup_alloc(void)
2347 {
2348         struct mem_cgroup *mem;
2349         int size = mem_cgroup_size();
2350
2351         if (size < PAGE_SIZE)
2352                 mem = kmalloc(size, GFP_KERNEL);
2353         else
2354                 mem = vmalloc(size);
2355
2356         if (mem)
2357                 memset(mem, 0, size);
2358         return mem;
2359 }
2360
2361 /*
2362  * At destroying mem_cgroup, references from swap_cgroup can remain.
2363  * (scanning all at force_empty is too costly...)
2364  *
2365  * Instead of clearing all references at force_empty, we remember
2366  * the number of reference from swap_cgroup and free mem_cgroup when
2367  * it goes down to 0.
2368  *
2369  * Removal of cgroup itself succeeds regardless of refs from swap.
2370  */
2371
2372 static void __mem_cgroup_free(struct mem_cgroup *mem)
2373 {
2374         int node;
2375
2376         free_css_id(&mem_cgroup_subsys, &mem->css);
2377
2378         for_each_node_state(node, N_POSSIBLE)
2379                 free_mem_cgroup_per_zone_info(mem, node);
2380
2381         if (mem_cgroup_size() < PAGE_SIZE)
2382                 kfree(mem);
2383         else
2384                 vfree(mem);
2385 }
2386
2387 static void mem_cgroup_get(struct mem_cgroup *mem)
2388 {
2389         atomic_inc(&mem->refcnt);
2390 }
2391
2392 static void mem_cgroup_put(struct mem_cgroup *mem)
2393 {
2394         if (atomic_dec_and_test(&mem->refcnt)) {
2395                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
2396                 __mem_cgroup_free(mem);
2397                 if (parent)
2398                         mem_cgroup_put(parent);
2399         }
2400 }
2401
2402 /*
2403  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
2404  */
2405 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
2406 {
2407         if (!mem->res.parent)
2408                 return NULL;
2409         return mem_cgroup_from_res_counter(mem->res.parent, res);
2410 }
2411
2412 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2413 static void __init enable_swap_cgroup(void)
2414 {
2415         if (!mem_cgroup_disabled() && really_do_swap_account)
2416                 do_swap_account = 1;
2417 }
2418 #else
2419 static void __init enable_swap_cgroup(void)
2420 {
2421 }
2422 #endif
2423
2424 static struct cgroup_subsys_state * __ref
2425 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2426 {
2427         struct mem_cgroup *mem, *parent;
2428         long error = -ENOMEM;
2429         int node;
2430
2431         mem = mem_cgroup_alloc();
2432         if (!mem)
2433                 return ERR_PTR(error);
2434
2435         for_each_node_state(node, N_POSSIBLE)
2436                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2437                         goto free_out;
2438         /* root ? */
2439         if (cont->parent == NULL) {
2440                 enable_swap_cgroup();
2441                 parent = NULL;
2442         } else {
2443                 parent = mem_cgroup_from_cont(cont->parent);
2444                 mem->use_hierarchy = parent->use_hierarchy;
2445         }
2446
2447         if (parent && parent->use_hierarchy) {
2448                 res_counter_init(&mem->res, &parent->res);
2449                 res_counter_init(&mem->memsw, &parent->memsw);
2450                 /*
2451                  * We increment refcnt of the parent to ensure that we can
2452                  * safely access it on res_counter_charge/uncharge.
2453                  * This refcnt will be decremented when freeing this
2454                  * mem_cgroup(see mem_cgroup_put).
2455                  */
2456                 mem_cgroup_get(parent);
2457         } else {
2458                 res_counter_init(&mem->res, NULL);
2459                 res_counter_init(&mem->memsw, NULL);
2460         }
2461         mem->last_scanned_child = 0;
2462         spin_lock_init(&mem->reclaim_param_lock);
2463
2464         if (parent)
2465                 mem->swappiness = get_swappiness(parent);
2466         atomic_set(&mem->refcnt, 1);
2467         return &mem->css;
2468 free_out:
2469         __mem_cgroup_free(mem);
2470         return ERR_PTR(error);
2471 }
2472
2473 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2474                                         struct cgroup *cont)
2475 {
2476         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2477
2478         return mem_cgroup_force_empty(mem, false);
2479 }
2480
2481 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2482                                 struct cgroup *cont)
2483 {
2484         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2485
2486         mem_cgroup_put(mem);
2487 }
2488
2489 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2490                                 struct cgroup *cont)
2491 {
2492         int ret;
2493
2494         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2495                                 ARRAY_SIZE(mem_cgroup_files));
2496
2497         if (!ret)
2498                 ret = register_memsw_files(cont, ss);
2499         return ret;
2500 }
2501
2502 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2503                                 struct cgroup *cont,
2504                                 struct cgroup *old_cont,
2505                                 struct task_struct *p)
2506 {
2507         mutex_lock(&memcg_tasklist);
2508         /*
2509          * FIXME: It's better to move charges of this process from old
2510          * memcg to new memcg. But it's just on TODO-List now.
2511          */
2512         mutex_unlock(&memcg_tasklist);
2513 }
2514
2515 struct cgroup_subsys mem_cgroup_subsys = {
2516         .name = "memory",
2517         .subsys_id = mem_cgroup_subsys_id,
2518         .create = mem_cgroup_create,
2519         .pre_destroy = mem_cgroup_pre_destroy,
2520         .destroy = mem_cgroup_destroy,
2521         .populate = mem_cgroup_populate,
2522         .attach = mem_cgroup_move_task,
2523         .early_init = 0,
2524         .use_id = 1,
2525 };
2526
2527 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2528
2529 static int __init disable_swap_account(char *s)
2530 {
2531         really_do_swap_account = 0;
2532         return 1;
2533 }
2534 __setup("noswapaccount", disable_swap_account);
2535 #endif