]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
authorSteven Rostedt <srostedt@redhat.com>
Fri, 13 Mar 2009 01:12:46 +0000 (21:12 -0400)
committerSteven Rostedt <srostedt@redhat.com>
Fri, 13 Mar 2009 01:12:46 +0000 (21:12 -0400)
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events.c

index 178858492a89bcafb3f5ca96dbdfd9ea5ba181d4..d07c2888396f8782d37cb00947ec8d3385b82f03 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/list.h>
+#include <linux/cpu.h>
 #include <linux/fs.h>
 
 #include "trace.h"
@@ -301,6 +302,10 @@ struct ring_buffer {
        struct mutex                    mutex;
 
        struct ring_buffer_per_cpu      **buffers;
+
+#ifdef CONFIG_HOTPLUG
+       struct notifier_block           cpu_notify;
+#endif
 };
 
 struct ring_buffer_iter {
@@ -459,6 +464,11 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  */
 extern int ring_buffer_page_too_big(void);
 
+#ifdef CONFIG_HOTPLUG
+static int __cpuinit rb_cpu_notify(struct notifier_block *self,
+                                  unsigned long action, void *hcpu);
+#endif
+
 /**
  * ring_buffer_alloc - allocate a new ring_buffer
  * @size: the size in bytes per cpu that is needed.
@@ -496,7 +506,8 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
        if (buffer->pages == 1)
                buffer->pages++;
 
-       cpumask_copy(buffer->cpumask, cpu_possible_mask);
+       get_online_cpus();
+       cpumask_copy(buffer->cpumask, cpu_online_mask);
        buffer->cpus = nr_cpu_ids;
 
        bsize = sizeof(void *) * nr_cpu_ids;
@@ -512,6 +523,13 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
                        goto fail_free_buffers;
        }
 
+#ifdef CONFIG_HOTPLUG
+       buffer->cpu_notify.notifier_call = rb_cpu_notify;
+       buffer->cpu_notify.priority = 0;
+       register_cpu_notifier(&buffer->cpu_notify);
+#endif
+
+       put_online_cpus();
        mutex_init(&buffer->mutex);
 
        return buffer;
@@ -525,6 +543,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
 
  fail_free_cpumask:
        free_cpumask_var(buffer->cpumask);
+       put_online_cpus();
 
  fail_free_buffer:
        kfree(buffer);
@@ -541,9 +560,17 @@ ring_buffer_free(struct ring_buffer *buffer)
 {
        int cpu;
 
+       get_online_cpus();
+
+#ifdef CONFIG_HOTPLUG
+       unregister_cpu_notifier(&buffer->cpu_notify);
+#endif
+
        for_each_buffer_cpu(buffer, cpu)
                rb_free_cpu_buffer(buffer->buffers[cpu]);
 
+       put_online_cpus();
+
        free_cpumask_var(buffer->cpumask);
 
        kfree(buffer);
@@ -649,16 +676,15 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
                return size;
 
        mutex_lock(&buffer->mutex);
+       get_online_cpus();
 
        nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
        if (size < buffer_size) {
 
                /* easy case, just free pages */
-               if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
-                       mutex_unlock(&buffer->mutex);
-                       return -1;
-               }
+               if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
+                       goto out_fail;
 
                rm_pages = buffer->pages - nr_pages;
 
@@ -677,10 +703,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
         * add these pages to the cpu_buffers. Otherwise we just free
         * them all and return -ENOMEM;
         */
-       if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
-               mutex_unlock(&buffer->mutex);
-               return -1;
-       }
+       if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
+               goto out_fail;
 
        new_pages = nr_pages - buffer->pages;
 
@@ -705,13 +729,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
                rb_insert_pages(cpu_buffer, &pages, new_pages);
        }
 
-       if (RB_WARN_ON(buffer, !list_empty(&pages))) {
-               mutex_unlock(&buffer->mutex);
-               return -1;
-       }
+       if (RB_WARN_ON(buffer, !list_empty(&pages)))
+               goto out_fail;
 
  out:
        buffer->pages = nr_pages;
+       put_online_cpus();
        mutex_unlock(&buffer->mutex);
 
        return size;
@@ -721,8 +744,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
                list_del_init(&bpage->list);
                free_buffer_page(bpage);
        }
+       put_online_cpus();
        mutex_unlock(&buffer->mutex);
        return -ENOMEM;
+
+       /*
+        * Something went totally wrong, and we are too paranoid
+        * to even clean up the mess.
+        */
+ out_fail:
+       put_online_cpus();
+       mutex_unlock(&buffer->mutex);
+       return -1;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_resize);
 
@@ -1528,11 +1561,15 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
 
+       get_online_cpus();
+
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return;
+               goto out;
 
        cpu_buffer = buffer->buffers[cpu];
        atomic_inc(&cpu_buffer->record_disabled);
+ out:
+       put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
 
@@ -1548,11 +1585,15 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
 
+       get_online_cpus();
+
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return;
+               goto out;
 
        cpu_buffer = buffer->buffers[cpu];
        atomic_dec(&cpu_buffer->record_disabled);
+ out:
+       put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
 
@@ -1564,12 +1605,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       unsigned long ret = 0;
+
+       get_online_cpus();
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return 0;
+               goto out;
 
        cpu_buffer = buffer->buffers[cpu];
-       return cpu_buffer->entries;
+       ret = cpu_buffer->entries;
+ out:
+       put_online_cpus();
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
 
@@ -1581,12 +1629,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       unsigned long ret = 0;
+
+       get_online_cpus();
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return 0;
+               goto out;
 
        cpu_buffer = buffer->buffers[cpu];
-       return cpu_buffer->overrun;
+       ret = cpu_buffer->overrun;
+ out:
+       put_online_cpus();
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
 
@@ -1603,12 +1658,16 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
        unsigned long entries = 0;
        int cpu;
 
+       get_online_cpus();
+
        /* if you care about this being correct, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
                entries += cpu_buffer->entries;
        }
 
+       put_online_cpus();
+
        return entries;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_entries);
@@ -1626,12 +1685,16 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
        unsigned long overruns = 0;
        int cpu;
 
+       get_online_cpus();
+
        /* if you care about this being correct, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
                overruns += cpu_buffer->overrun;
        }
 
+       put_online_cpus();
+
        return overruns;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
@@ -1663,9 +1726,14 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
  */
 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
 {
-       struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+       struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long flags;
 
+       if (!iter)
+               return;
+
+       cpu_buffer = iter->cpu_buffer;
+
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
        rb_iter_reset(iter);
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -1900,9 +1968,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
        struct buffer_page *reader;
        int nr_loops = 0;
 
-       if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return NULL;
-
        cpu_buffer = buffer->buffers[cpu];
 
  again:
@@ -2028,13 +2093,21 @@ struct ring_buffer_event *
 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
-       struct ring_buffer_event *event;
+       struct ring_buffer_event *event = NULL;
        unsigned long flags;
 
+       get_online_cpus();
+
+       if (!cpumask_test_cpu(cpu, buffer->cpumask))
+               goto out;
+
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
        event = rb_buffer_peek(buffer, cpu, ts);
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
+ out:
+       put_online_cpus();
+
        return event;
 }
 
@@ -2071,24 +2144,31 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
 struct ring_buffer_event *
 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
 {
-       struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
-       struct ring_buffer_event *event;
+       struct ring_buffer_per_cpu *cpu_buffer;
+       struct ring_buffer_event *event = NULL;
        unsigned long flags;
 
+       /* might be called in atomic */
+       preempt_disable();
+
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return NULL;
+               goto out;
 
+       cpu_buffer = buffer->buffers[cpu];
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
        event = rb_buffer_peek(buffer, cpu, ts);
        if (!event)
-               goto out;
+               goto out_unlock;
 
        rb_advance_reader(cpu_buffer);
 
- out:
+ out_unlock:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
+ out:
+       preempt_enable();
+
        return event;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_consume);
@@ -2109,15 +2189,17 @@ struct ring_buffer_iter *
 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       struct ring_buffer_iter *iter;
+       struct ring_buffer_iter *iter = NULL;
        unsigned long flags;
 
+       get_online_cpus();
+
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return NULL;
+               goto out;
 
        iter = kmalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter)
-               return NULL;
+               goto out;
 
        cpu_buffer = buffer->buffers[cpu];
 
@@ -2132,6 +2214,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
        __raw_spin_unlock(&cpu_buffer->lock);
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
+ out:
+       put_online_cpus();
+
        return iter;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
@@ -2224,9 +2309,13 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        unsigned long flags;
+       int resched;
+
+       /* Can't use get_online_cpus because this can be in atomic */
+       resched = ftrace_preempt_disable();
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return;
+               goto out;
 
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
@@ -2237,6 +2326,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
        __raw_spin_unlock(&cpu_buffer->lock);
 
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+       ftrace_preempt_enable(resched);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
 
@@ -2246,10 +2337,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  */
 void ring_buffer_reset(struct ring_buffer *buffer)
 {
+       int resched;
        int cpu;
 
+       /* Can't use get_online_cpus because this can be in atomic */
+       resched = ftrace_preempt_disable();
+
        for_each_buffer_cpu(buffer, cpu)
                ring_buffer_reset_cpu(buffer, cpu);
+
+       ftrace_preempt_enable(resched);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_reset);
 
@@ -2262,12 +2359,17 @@ int ring_buffer_empty(struct ring_buffer *buffer)
        struct ring_buffer_per_cpu *cpu_buffer;
        int cpu;
 
+       get_online_cpus();
+
        /* yes this is racy, but if you don't like the race, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
                if (!rb_per_cpu_empty(cpu_buffer))
                        return 0;
        }
+
+       put_online_cpus();
+
        return 1;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_empty);
@@ -2280,12 +2382,20 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       int ret = 1;
+
+       get_online_cpus();
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
-               return 1;
+               goto out;
 
        cpu_buffer = buffer->buffers[cpu];
-       return rb_per_cpu_empty(cpu_buffer);
+       ret = rb_per_cpu_empty(cpu_buffer);
+
+ out:
+       put_online_cpus();
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
 
@@ -2304,32 +2414,37 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
 {
        struct ring_buffer_per_cpu *cpu_buffer_a;
        struct ring_buffer_per_cpu *cpu_buffer_b;
+       int ret = -EINVAL;
+
+       get_online_cpus();
 
        if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
            !cpumask_test_cpu(cpu, buffer_b->cpumask))
-               return -EINVAL;
+               goto out;
 
        /* At least make sure the two buffers are somewhat the same */
        if (buffer_a->pages != buffer_b->pages)
-               return -EINVAL;
+               goto out;
+
+       ret = -EAGAIN;
 
        if (ring_buffer_flags != RB_BUFFERS_ON)
-               return -EAGAIN;
+               goto out;
 
        if (atomic_read(&buffer_a->record_disabled))
-               return -EAGAIN;
+               goto out;
 
        if (atomic_read(&buffer_b->record_disabled))
-               return -EAGAIN;
+               goto out;
 
        cpu_buffer_a = buffer_a->buffers[cpu];
        cpu_buffer_b = buffer_b->buffers[cpu];
 
        if (atomic_read(&cpu_buffer_a->record_disabled))
-               return -EAGAIN;
+               goto out;
 
        if (atomic_read(&cpu_buffer_b->record_disabled))
-               return -EAGAIN;
+               goto out;
 
        /*
         * We can't do a synchronize_sched here because this
@@ -2349,7 +2464,11 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
        atomic_dec(&cpu_buffer_a->record_disabled);
        atomic_dec(&cpu_buffer_b->record_disabled);
 
-       return 0;
+       ret = 0;
+out:
+       put_online_cpus();
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
 
@@ -2464,27 +2583,32 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
        u64 save_timestamp;
        int ret = -1;
 
+       get_online_cpus();
+
+       if (!cpumask_test_cpu(cpu, buffer->cpumask))
+               goto out;
+
        /*
         * If len is not big enough to hold the page header, then
         * we can not copy anything.
         */
        if (len <= BUF_PAGE_HDR_SIZE)
-               return -1;
+               goto out;
 
        len -= BUF_PAGE_HDR_SIZE;
 
        if (!data_page)
-               return -1;
+               goto out;
 
        bpage = *data_page;
        if (!bpage)
-               return -1;
+               goto out;
 
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
        reader = rb_get_reader_page(cpu_buffer);
        if (!reader)
-               goto out;
+               goto out_unlock;
 
        event = rb_reader_event(cpu_buffer);
 
@@ -2506,7 +2630,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                unsigned int size;
 
                if (full)
-                       goto out;
+                       goto out_unlock;
 
                if (len > (commit - read))
                        len = (commit - read);
@@ -2514,7 +2638,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                size = rb_event_length(event);
 
                if (len < size)
-                       goto out;
+                       goto out_unlock;
 
                /* save the current timestamp, since the user will need it */
                save_timestamp = cpu_buffer->read_stamp;
@@ -2553,9 +2677,12 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
        }
        ret = read;
 
- out:
+ out_unlock:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
+ out:
+       put_online_cpus();
+
        return ret;
 }
 
@@ -2629,3 +2756,42 @@ static __init int rb_init_debugfs(void)
 }
 
 fs_initcall(rb_init_debugfs);
+
+#ifdef CONFIG_HOTPLUG
+static int __cpuinit rb_cpu_notify(struct notifier_block *self,
+                                  unsigned long action, void *hcpu)
+{
+       struct ring_buffer *buffer =
+               container_of(self, struct ring_buffer, cpu_notify);
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               if (cpu_isset(cpu, *buffer->cpumask))
+                       return NOTIFY_OK;
+
+               buffer->buffers[cpu] =
+                       rb_allocate_cpu_buffer(buffer, cpu);
+               if (!buffer->buffers[cpu]) {
+                       WARN(1, "failed to allocate ring buffer on CPU %ld\n",
+                            cpu);
+                       return NOTIFY_OK;
+               }
+               smp_wmb();
+               cpu_set(cpu, *buffer->cpumask);
+               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               /*
+                * Do nothing.
+                *  If we were to free the buffer, then the user would
+                *  lose any trace that was in the buffer.
+                */
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+#endif
index 5c9c6d9070547724ea979604648f89ed4c424244..c3946a6df34e8be4081e04ee58130cfc2ba45e6f 100644 (file)
 unsigned long __read_mostly    tracing_max_latency;
 unsigned long __read_mostly    tracing_thresh;
 
+/*
+ * On boot up, the ring buffer is set to the minimum size, so that
+ * we do not waste memory on systems that are not using tracing.
+ */
+static int ring_buffer_expanded;
+
 /*
  * We need to change this state when a selftest is running.
  * A selftest will lurk into the ring-buffer to count the
@@ -128,6 +134,8 @@ static int __init set_ftrace(char *str)
 {
        strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
        default_bootup_tracer = bootup_tracer_buf;
+       /* We are using ftrace early, expand it */
+       ring_buffer_expanded = 1;
        return 1;
 }
 __setup("ftrace=", set_ftrace);
@@ -1658,6 +1666,19 @@ static int trace_empty(struct trace_iterator *iter)
 {
        int cpu;
 
+       /* If we are looking at one CPU buffer, only check that one */
+       if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
+               cpu = iter->cpu_file;
+               if (iter->buffer_iter[cpu]) {
+                       if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+                               return 0;
+               } else {
+                       if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+                               return 0;
+               }
+               return 1;
+       }
+
        for_each_tracing_cpu(cpu) {
                if (iter->buffer_iter[cpu]) {
                        if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
@@ -1784,17 +1805,11 @@ __tracing_open(struct inode *inode, struct file *file)
 
                        iter->buffer_iter[cpu] =
                                ring_buffer_read_start(iter->tr->buffer, cpu);
-
-                       if (!iter->buffer_iter[cpu])
-                               goto fail_buffer;
                }
        } else {
                cpu = iter->cpu_file;
                iter->buffer_iter[cpu] =
                                ring_buffer_read_start(iter->tr->buffer, cpu);
-
-               if (!iter->buffer_iter[cpu])
-                       goto fail;
        }
 
        /* TODO stop tracer */
@@ -2315,6 +2330,60 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
        return t->init(tr);
 }
 
+static int tracing_resize_ring_buffer(unsigned long size)
+{
+       int ret;
+
+       /*
+        * If kernel or user changes the size of the ring buffer
+        * it get completed.
+        */
+       ring_buffer_expanded = 1;
+
+       ret = ring_buffer_resize(global_trace.buffer, size);
+       if (ret < 0)
+               return ret;
+
+       ret = ring_buffer_resize(max_tr.buffer, size);
+       if (ret < 0) {
+               int r;
+
+               r = ring_buffer_resize(global_trace.buffer,
+                                      global_trace.entries);
+               if (r < 0) {
+                       /* AARGH! We are left with different
+                        * size max buffer!!!! */
+                       WARN_ON(1);
+                       tracing_disabled = 1;
+               }
+               return ret;
+       }
+
+       global_trace.entries = size;
+
+       return ret;
+}
+
+/**
+ * tracing_update_buffers - used by tracing facility to expand ring buffers
+ *
+ * To save on memory when the tracing is never used on a system with it
+ * configured in. The ring buffers are set to a minimum size. But once
+ * a user starts to use the tracing facility, then they need to grow
+ * to their default size.
+ *
+ * This function is to be called when a tracer is about to be used.
+ */
+int tracing_update_buffers(void)
+{
+       int ret = 0;
+
+       if (!ring_buffer_expanded)
+               ret = tracing_resize_ring_buffer(trace_buf_size);
+
+       return ret;
+}
+
 struct trace_option_dentry;
 
 static struct trace_option_dentry *
@@ -2330,6 +2399,13 @@ static int tracing_set_tracer(const char *buf)
        struct tracer *t;
        int ret = 0;
 
+       if (!ring_buffer_expanded) {
+               ret = tracing_resize_ring_buffer(trace_buf_size);
+               if (ret < 0)
+                       return ret;
+               ret = 0;
+       }
+
        mutex_lock(&trace_types_lock);
        for (t = trace_types; t; t = t->next) {
                if (strcmp(t->name, buf) == 0)
@@ -2903,28 +2979,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        val <<= 10;
 
        if (val != global_trace.entries) {
-               ret = ring_buffer_resize(global_trace.buffer, val);
-               if (ret < 0) {
-                       cnt = ret;
-                       goto out;
-               }
-
-               ret = ring_buffer_resize(max_tr.buffer, val);
+               ret = tracing_resize_ring_buffer(val);
                if (ret < 0) {
-                       int r;
                        cnt = ret;
-                       r = ring_buffer_resize(global_trace.buffer,
-                                              global_trace.entries);
-                       if (r < 0) {
-                               /* AARGH! We are left with different
-                                * size max buffer!!!! */
-                               WARN_ON(1);
-                               tracing_disabled = 1;
-                       }
                        goto out;
                }
-
-               global_trace.entries = val;
        }
 
        filp->f_pos += cnt;
@@ -3916,6 +3975,7 @@ void ftrace_dump(void)
 __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
+       int ring_buf_size;
        int i;
        int ret = -ENOMEM;
 
@@ -3928,12 +3988,18 @@ __init static int tracer_alloc_buffers(void)
        if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
                goto out_free_tracing_cpumask;
 
+       /* To save memory, keep the ring buffer size to its minimum */
+       if (ring_buffer_expanded)
+               ring_buf_size = trace_buf_size;
+       else
+               ring_buf_size = 1;
+
        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
        cpumask_copy(tracing_cpumask, cpu_all_mask);
        cpumask_clear(tracing_reader_cpumask);
 
        /* TODO: make the number of buffers hot pluggable with CPUS */
-       global_trace.buffer = ring_buffer_alloc(trace_buf_size,
+       global_trace.buffer = ring_buffer_alloc(ring_buf_size,
                                                   TRACE_BUFFER_FLAGS);
        if (!global_trace.buffer) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
@@ -3944,7 +4010,7 @@ __init static int tracer_alloc_buffers(void)
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       max_tr.buffer = ring_buffer_alloc(trace_buf_size,
+       max_tr.buffer = ring_buffer_alloc(ring_buf_size,
                                             TRACE_BUFFER_FLAGS);
        if (!max_tr.buffer) {
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
index c5e1d8865fe4d2d4036dd886f447cbf737b45de7..336324d717f88d633466062b35f55af8883e69fd 100644 (file)
@@ -737,6 +737,9 @@ static inline void trace_branch_disable(void)
 }
 #endif /* CONFIG_BRANCH_TRACER */
 
+/* set ring buffers to default size if not already done so */
+int tracing_update_buffers(void);
+
 /* trace event type bit fields, not numeric */
 enum {
        TRACE_EVENT_TYPE_PRINTF         = 1,
index 769dfd00fc859a1b4ba78b7e30696c651d5b904e..ca624df735916d3264d6d2a7f3f273f8972d265c 100644 (file)
@@ -141,6 +141,10 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
        if (!cnt || cnt < 0)
                return 0;
 
+       ret = tracing_update_buffers();
+       if (ret < 0)
+               return ret;
+
        ret = get_user(ch, ubuf++);
        if (ret)
                return ret;
@@ -331,6 +335,10 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
        if (ret < 0)
                return ret;
 
+       ret = tracing_update_buffers();
+       if (ret < 0)
+               return ret;
+
        switch (val) {
        case 0:
        case 1: