]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memcontrol.c
Remove executable permission for dma.c
[linux-2.6-omap-h63xx.git] / mm / memcontrol.c
index 6f6a575e77ad970156b2fceee7a022707ce1805f..2fc6d6c482387ed35a735746bf4580d3d38e68da 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/backing-dev.h>
 #include <linux/bit_spinlock.h>
 #include <linux/rcupdate.h>
+#include <linux/limits.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
@@ -295,6 +296,9 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
        struct mem_cgroup *mem = NULL;
+
+       if (!mm)
+               return NULL;
        /*
         * Because we have no locks, mm->owner's may be being moved to other
         * cgroup. We use css_tryget() here even if this looks
@@ -486,30 +490,23 @@ void mem_cgroup_move_lists(struct page *page,
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 {
        int ret;
+       struct mem_cgroup *curr = NULL;
 
        task_lock(task);
-       ret = task->mm && mm_match_cgroup(task->mm, mem);
+       rcu_read_lock();
+       curr = try_get_mem_cgroup_from_mm(task->mm);
+       rcu_read_unlock();
        task_unlock(task);
+       if (!curr)
+               return 0;
+       if (curr->use_hierarchy)
+               ret = css_is_ancestor(&curr->css, &mem->css);
+       else
+               ret = (curr == mem);
+       css_put(&curr->css);
        return ret;
 }
 
-/*
- * Calculate mapped_ratio under memory controller. This will be used in
- * vmscan.c for deteremining we have to reclaim mapped pages.
- */
-int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
-{
-       long total, rss;
-
-       /*
-        * usage is recorded in bytes. But, here, we assume the number of
-        * physical pages can be represented by "long" on any arch.
-        */
-       total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
-       rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
-       return (int)((rss * 100L) / total);
-}
-
 /*
  * prev_priority control...this will be used in memory reclaim path.
  */
@@ -708,6 +705,74 @@ static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
        (*val)++;
        return 0;
 }
+
+/**
+ * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
+ * @memcg: The memory cgroup that went over limit
+ * @p: Task that is going to be killed
+ *
+ * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
+ * enabled
+ */
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+{
+       struct cgroup *task_cgrp;
+       struct cgroup *mem_cgrp;
+       /*
+        * Need a buffer in BSS, can't rely on allocations. The code relies
+        * on the assumption that OOM is serialized for memory controller.
+        * If this assumption is broken, revisit this code.
+        */
+       static char memcg_name[PATH_MAX];
+       int ret;
+
+       if (!memcg)
+               return;
+
+
+       rcu_read_lock();
+
+       mem_cgrp = memcg->css.cgroup;
+       task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
+
+       ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
+       if (ret < 0) {
+               /*
+                * Unfortunately, we are unable to convert to a useful name
+                * But we'll still print out the usage information
+                */
+               rcu_read_unlock();
+               goto done;
+       }
+       rcu_read_unlock();
+
+       printk(KERN_INFO "Task in %s killed", memcg_name);
+
+       rcu_read_lock();
+       ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
+       if (ret < 0) {
+               rcu_read_unlock();
+               goto done;
+       }
+       rcu_read_unlock();
+
+       /*
+        * Continues from above, so we don't need an KERN_ level
+        */
+       printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
+done:
+
+       printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
+               res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
+               res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
+               res_counter_read_u64(&memcg->res, RES_FAILCNT));
+       printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
+               "failcnt %llu\n",
+               res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
+               res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
+               res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
+}
+
 /*
  * This function returns the number of memcg under hierarchy tree. Returns
  * 1(self count) if no children.
@@ -820,6 +885,19 @@ bool mem_cgroup_oom_called(struct task_struct *task)
        rcu_read_unlock();
        return ret;
 }
+
+static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
+{
+       mem->last_oom_jiffies = jiffies;
+       return 0;
+}
+
+static void record_last_oom(struct mem_cgroup *mem)
+{
+       mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
+}
+
+
 /*
  * Unlike exported interface, "oom" parameter is added. if oom==true,
  * oom-killer can be invoked.
@@ -902,7 +980,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                                mutex_lock(&memcg_tasklist);
                                mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
                                mutex_unlock(&memcg_tasklist);
-                               mem_over_limit->last_oom_jiffies = jiffies;
+                               record_last_oom(mem_over_limit);
                        }
                        goto nomem;
                }
@@ -913,20 +991,55 @@ nomem:
        return -ENOMEM;
 }
 
+
+/*
+ * A helper function to get mem_cgroup from ID. must be called under
+ * rcu_read_lock(). The caller must check css_is_removed() or some if
+ * it's concern. (dropping refcnt from swap can be called against removed
+ * memcg.)
+ */
+static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
+{
+       struct cgroup_subsys_state *css;
+
+       /* ID 0 is unused ID */
+       if (!id)
+               return NULL;
+       css = css_lookup(&mem_cgroup_subsys, id);
+       if (!css)
+               return NULL;
+       return container_of(css, struct mem_cgroup, css);
+}
+
 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
 {
        struct mem_cgroup *mem;
+       struct page_cgroup *pc;
+       unsigned short id;
        swp_entry_t ent;
 
+       VM_BUG_ON(!PageLocked(page));
+
        if (!PageSwapCache(page))
                return NULL;
 
-       ent.val = page_private(page);
-       mem = lookup_swap_cgroup(ent);
-       if (!mem)
-               return NULL;
-       if (!css_tryget(&mem->css))
-               return NULL;
+       pc = lookup_page_cgroup(page);
+       /*
+        * Used bit of swapcache is solid under page lock.
+        */
+       if (PageCgroupUsed(pc)) {
+               mem = pc->mem_cgroup;
+               if (mem && !css_tryget(&mem->css))
+                       mem = NULL;
+       } else {
+               ent.val = page_private(page);
+               id = lookup_swap_cgroup(ent);
+               rcu_read_lock();
+               mem = mem_cgroup_lookup(id);
+               if (mem && !css_tryget(&mem->css))
+                       mem = NULL;
+               rcu_read_unlock();
+       }
        return mem;
 }
 
@@ -1125,6 +1238,10 @@ int mem_cgroup_newpage_charge(struct page *page,
                                MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
 }
 
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+                                       enum charge_type ctype);
+
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
@@ -1161,16 +1278,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                unlock_page_cgroup(pc);
        }
 
-       if (do_swap_account && PageSwapCache(page)) {
-               mem = try_get_mem_cgroup_from_swapcache(page);
-               if (mem)
-                       mm = NULL;
-                 else
-                       mem = NULL;
-               /* SwapCache may be still linked to LRU now. */
-               mem_cgroup_lru_del_before_commit_swapcache(page);
-       }
-
        if (unlikely(!mm && !mem))
                mm = &init_mm;
 
@@ -1178,22 +1285,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                return mem_cgroup_charge_common(page, mm, gfp_mask,
                                MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
 
-       ret = mem_cgroup_charge_common(page, mm, gfp_mask,
-                               MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
-       if (mem)
-               css_put(&mem->css);
-       if (PageSwapCache(page))
-               mem_cgroup_lru_add_after_commit_swapcache(page);
+       /* shmem */
+       if (PageSwapCache(page)) {
+               ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+               if (!ret)
+                       __mem_cgroup_commit_charge_swapin(page, mem,
+                                       MEM_CGROUP_CHARGE_TYPE_SHMEM);
+       } else
+               ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+                                       MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
 
-       if (do_swap_account && !ret && PageSwapCache(page)) {
-               swp_entry_t ent = {.val = page_private(page)};
-               /* avoid double counting */
-               mem = swap_cgroup_record(ent, NULL);
-               if (mem) {
-                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-                       mem_cgroup_put(mem);
-               }
-       }
        return ret;
 }
 
@@ -1236,7 +1337,9 @@ charge_cur_mm:
        return __mem_cgroup_try_charge(mm, mask, ptr, true);
 }
 
-void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+                                       enum charge_type ctype)
 {
        struct page_cgroup *pc;
 
@@ -1246,7 +1349,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
                return;
        pc = lookup_page_cgroup(page);
        mem_cgroup_lru_del_before_commit_swapcache(page);
-       __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+       __mem_cgroup_commit_charge(ptr, pc, ctype);
        mem_cgroup_lru_add_after_commit_swapcache(page);
        /*
         * Now swap is on-memory. This means this page may be
@@ -1257,18 +1360,32 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
         */
        if (do_swap_account && PageSwapCache(page)) {
                swp_entry_t ent = {.val = page_private(page)};
+               unsigned short id;
                struct mem_cgroup *memcg;
-               memcg = swap_cgroup_record(ent, NULL);
+
+               id = swap_cgroup_record(ent, 0);
+               rcu_read_lock();
+               memcg = mem_cgroup_lookup(id);
                if (memcg) {
+                       /*
+                        * This recorded memcg can be obsolete one. So, avoid
+                        * calling css_tryget
+                        */
                        res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
                        mem_cgroup_put(memcg);
                }
-
+               rcu_read_unlock();
        }
        /* add this page(page_cgroup) to the LRU we want. */
 
 }
 
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+       __mem_cgroup_commit_charge_swapin(page, ptr,
+                                       MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
+
 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
 {
        if (mem_cgroup_disabled())
@@ -1384,7 +1501,7 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
                                        MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
        /* record memcg information */
        if (do_swap_account && memcg) {
-               swap_cgroup_record(ent, memcg);
+               swap_cgroup_record(ent, css_id(&memcg->css));
                mem_cgroup_get(memcg);
        }
        if (memcg)
@@ -1399,15 +1516,23 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
 void mem_cgroup_uncharge_swap(swp_entry_t ent)
 {
        struct mem_cgroup *memcg;
+       unsigned short id;
 
        if (!do_swap_account)
                return;
 
-       memcg = swap_cgroup_record(ent, NULL);
+       id = swap_cgroup_record(ent, 0);
+       rcu_read_lock();
+       memcg = mem_cgroup_lookup(id);
        if (memcg) {
+               /*
+                * We uncharge this because swap is freed.
+                * This memcg can be obsolete one. We avoid calling css_tryget
+                */
                res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
                mem_cgroup_put(memcg);
        }
+       rcu_read_unlock();
 }
 #endif