]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
ftrace: limit trace entries
authorSteven Rostedt <rostedt@goodmis.org>
Mon, 12 May 2008 19:21:04 +0000 (21:21 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 20:05:14 +0000 (22:05 +0200)
Currently there is no protection from the root user to use up all of
memory for trace buffers. If the root user allocates too many entries,
the OOM killer might start kill off all tasks.

This patch adds an algorith to check the following condition:

 pages_requested > (freeable_memory + current_trace_buffer_pages) / 4

If the above is met then the allocation fails. The above prevents more
than 1/4th of freeable memory from being used by trace buffers.

To determine the freeable_memory, I made determine_dirtyable_memory in
mm/page-writeback.c global.

Special thanks goes to Peter Zijlstra for suggesting the above calculation.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/writeback.h
kernel/trace/trace.c
mm/page-writeback.c

index f462439cc2886c56f7c1940f4100ba726d4068c4..bd91987c065fcd1f923a5cd05d6f781c3f685aab 100644 (file)
@@ -105,6 +105,8 @@ extern int vm_highmem_is_dirtyable;
 extern int block_dump;
 extern int laptop_mode;
 
+extern unsigned long determine_dirtyable_memory(void);
+
 extern int dirty_ratio_handler(struct ctl_table *table, int write,
                struct file *filp, void __user *buffer, size_t *lenp,
                loff_t *ppos);
index 82ced406aacf810b9fd45818ef943b35d3b003b8..2824cf48cdca581924883544396e9a2f4fe0d569 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/poll.h>
 #include <linux/gfp.h>
 #include <linux/fs.h>
+#include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
 
@@ -51,6 +52,8 @@ static int trace_free_page(void);
 
 static int tracing_disabled = 1;
 
+static unsigned long tracing_pages_allocated;
+
 long
 ns2usecs(cycle_t nsec)
 {
@@ -2591,12 +2594,41 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        }
 
        if (val > global_trace.entries) {
+               long pages_requested;
+               unsigned long freeable_pages;
+
+               /* make sure we have enough memory before mapping */
+               pages_requested =
+                       (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
+
+               /* account for each buffer (and max_tr) */
+               pages_requested *= tracing_nr_buffers * 2;
+
+               /* Check for overflow */
+               if (pages_requested < 0) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
+               freeable_pages = determine_dirtyable_memory();
+
+               /* we only allow to request 1/4 of useable memory */
+               if (pages_requested >
+                   ((freeable_pages + tracing_pages_allocated) / 4)) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
                while (global_trace.entries < val) {
                        if (trace_alloc_page()) {
                                cnt = -ENOMEM;
                                goto out;
                        }
+                       /* double check that we don't go over the known pages */
+                       if (tracing_pages_allocated > pages_requested)
+                               break;
                }
+
        } else {
                /* include the number of entries in val (inc of page entries) */
                while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
@@ -2776,6 +2808,7 @@ static int trace_alloc_page(void)
        struct page *page, *tmp;
        LIST_HEAD(pages);
        void *array;
+       unsigned pages_allocated = 0;
        int i;
 
        /* first allocate a page for each CPU */
@@ -2787,6 +2820,7 @@ static int trace_alloc_page(void)
                        goto free_pages;
                }
 
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 
@@ -2798,6 +2832,7 @@ static int trace_alloc_page(void)
                               "for trace buffer!\n");
                        goto free_pages;
                }
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 #endif
@@ -2819,6 +2854,7 @@ static int trace_alloc_page(void)
                SetPageLRU(page);
 #endif
        }
+       tracing_pages_allocated += pages_allocated;
        global_trace.entries += ENTRIES_PER_PAGE;
 
        return 0;
@@ -2853,6 +2889,8 @@ static int trace_free_page(void)
                page = list_entry(p, struct page, lru);
                ClearPageLRU(page);
                list_del(&page->lru);
+               tracing_pages_allocated--;
+               tracing_pages_allocated--;
                __free_page(page);
 
                tracing_reset(data);
index 789b6adbef37f1f38c3887f10ba130595903408a..b38f700825fca31b81a48ecce6d60b7856bfcf84 100644 (file)
@@ -126,8 +126,6 @@ static void background_writeout(unsigned long _min_pages);
 static struct prop_descriptor vm_completions;
 static struct prop_descriptor vm_dirties;
 
-static unsigned long determine_dirtyable_memory(void);
-
 /*
  * couple the period to the dirty_ratio:
  *
@@ -347,7 +345,13 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
 #endif
 }
 
-static unsigned long determine_dirtyable_memory(void)
+/**
+ * determine_dirtyable_memory - amount of memory that may be used
+ *
+ * Returns the numebr of pages that can currently be freed and used
+ * by the kernel for direct mappings.
+ */
+unsigned long determine_dirtyable_memory(void)
 {
        unsigned long x;