]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/trace/trace.c
Merge branches 'tracing/kmemtrace2' and 'tracing/ftrace' into tracing/urgent
[linux-2.6-omap-h63xx.git] / kernel / trace / trace.c
index b789c010512c57c98fadd09443503b87b6b808a5..0418fc338b5c2d2d36cdc2987e7e7d4666126929 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/gfp.h>
 #include <linux/fs.h>
 #include <linux/kprobes.h>
-#include <linux/seq_file.h>
 #include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
@@ -91,10 +90,10 @@ static inline void ftrace_enable_cpu(void)
        preempt_enable();
 }
 
-static cpumask_t __read_mostly         tracing_buffer_mask;
+static cpumask_var_t __read_mostly     tracing_buffer_mask;
 
 #define for_each_tracing_cpu(cpu)      \
-       for_each_cpu_mask(cpu, tracing_buffer_mask)
+       for_each_cpu(cpu, tracing_buffer_mask)
 
 /*
  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -1185,7 +1184,7 @@ enum trace_file_type {
        TRACE_FILE_ANNOTATE     = 2,
 };
 
-static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
+static void trace_iterator_increment(struct trace_iterator *iter)
 {
        /* Don't allow ftrace to trace into the ring buffers */
        ftrace_disable_cpu();
@@ -1264,7 +1263,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
        iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
 
        if (iter->ent)
-               trace_iterator_increment(iter, iter->cpu);
+               trace_iterator_increment(iter);
 
        return iter->ent ? iter : NULL;
 }
@@ -1493,10 +1492,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
        if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
                return;
 
-       if (cpu_isset(iter->cpu, iter->started))
+       if (cpumask_test_cpu(iter->cpu, iter->started))
                return;
 
-       cpu_set(iter->cpu, iter->started);
+       cpumask_set_cpu(iter->cpu, iter->started);
        trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
 }
 
@@ -1987,13 +1986,7 @@ static struct file_operations show_traces_fops = {
 /*
  * Only trace on a CPU if the bitmask is set:
  */
-static cpumask_t tracing_cpumask = CPU_MASK_ALL;
-
-/*
- * When tracing/tracing_cpu_mask is modified then this holds
- * the new bitmask we are about to install:
- */
-static cpumask_t tracing_cpumask_new;
+static cpumask_var_t tracing_cpumask;
 
 /*
  * The tracer itself will not take this lock, but still we want
@@ -2034,6 +2027,10 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                      size_t count, loff_t *ppos)
 {
        int err, cpu;
+       cpumask_var_t tracing_cpumask_new;
+
+       if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+               return -ENOMEM;
 
        mutex_lock(&tracing_cpumask_update_lock);
        err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
@@ -2047,26 +2044,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                 * Increase/decrease the disabled counter if we are
                 * about to flip a bit in the cpumask:
                 */
-               if (cpu_isset(cpu, tracing_cpumask) &&
-                               !cpu_isset(cpu, tracing_cpumask_new)) {
+               if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+                               !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
                }
-               if (!cpu_isset(cpu, tracing_cpumask) &&
-                               cpu_isset(cpu, tracing_cpumask_new)) {
+               if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+                               cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
                }
        }
        __raw_spin_unlock(&ftrace_max_lock);
        local_irq_enable();
 
-       tracing_cpumask = tracing_cpumask_new;
+       cpumask_copy(tracing_cpumask, tracing_cpumask_new);
 
        mutex_unlock(&tracing_cpumask_update_lock);
+       free_cpumask_var(tracing_cpumask_new);
 
        return count;
 
 err_unlock:
        mutex_unlock(&tracing_cpumask_update_lock);
+       free_cpumask_var(tracing_cpumask);
 
        return err;
 }
@@ -2456,10 +2455,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
        if (!iter)
                return -ENOMEM;
 
+       if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
+               kfree(iter);
+               return -ENOMEM;
+       }
+
        mutex_lock(&trace_types_lock);
 
        /* trace pipe does not show start of buffer */
-       cpus_setall(iter->started);
+       cpumask_setall(iter->started);
 
        iter->tr = &global_trace;
        iter->trace = current_trace;
@@ -2476,6 +2480,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
 {
        struct trace_iterator *iter = file->private_data;
 
+       free_cpumask_var(iter->started);
        kfree(iter);
        atomic_dec(&tracing_reader);
 
@@ -3094,7 +3099,6 @@ void ftrace_dump(void)
        static DEFINE_SPINLOCK(ftrace_dump_lock);
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
-       static cpumask_t mask;
        static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
@@ -3128,8 +3132,6 @@ void ftrace_dump(void)
         * and then release the locks again.
         */
 
-       cpus_clear(mask);
-
        while (!trace_empty(&iter)) {
 
                if (!cnt)
@@ -3165,19 +3167,28 @@ __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
        int i;
+       int ret = -ENOMEM;
 
-       /* TODO: make the number of buffers hot pluggable with CPUS */
-       tracing_buffer_mask = cpu_possible_map;
+       if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
+               goto out;
+
+       if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+               goto out_free_buffer_mask;
+
+       cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
+       cpumask_copy(tracing_cpumask, cpu_all_mask);
 
+       /* TODO: make the number of buffers hot pluggable with CPUS */
        global_trace.buffer = ring_buffer_alloc(trace_buf_size,
                                                   TRACE_BUFFER_FLAGS);
        if (!global_trace.buffer) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
                WARN_ON(1);
-               return 0;
+               goto out_free_cpumask;
        }
        global_trace.entries = ring_buffer_size(global_trace.buffer);
 
+
 #ifdef CONFIG_TRACER_MAX_TRACE
        max_tr.buffer = ring_buffer_alloc(trace_buf_size,
                                             TRACE_BUFFER_FLAGS);
@@ -3185,7 +3196,7 @@ __init static int tracer_alloc_buffers(void)
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
                WARN_ON(1);
                ring_buffer_free(global_trace.buffer);
-               return 0;
+               goto out_free_cpumask;
        }
        max_tr.entries = ring_buffer_size(max_tr.buffer);
        WARN_ON(max_tr.entries != global_trace.entries);
@@ -3215,8 +3226,14 @@ __init static int tracer_alloc_buffers(void)
                                       &trace_panic_notifier);
 
        register_die_notifier(&trace_die_notifier);
+       ret = 0;
 
-       return 0;
+out_free_cpumask:
+       free_cpumask_var(tracing_cpumask);
+out_free_buffer_mask:
+       free_cpumask_var(tracing_buffer_mask);
+out:
+       return ret;
 }
 early_initcall(tracer_alloc_buffers);
 fs_initcall(tracer_init_debugfs);