static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 static int cmdline_idx;
 static DEFINE_SPINLOCK(trace_cmdline_lock);
-atomic_t trace_record_cmdline_disabled;
+
+/* trace in all context switches */
+atomic_t trace_record_cmdline_enabled __read_mostly;
+
+/* temporary disable recording */
+atomic_t trace_record_cmdline_disabled __read_mostly;
 
 static void trace_init_cmdlines(void)
 {
 
 extern unsigned long tracing_max_latency;
 extern unsigned long tracing_thresh;
 
+extern atomic_t trace_record_cmdline_enabled;
+
 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
 void update_max_tr_single(struct trace_array *tr,
                          struct task_struct *tsk, int cpu);
 
 static void start_function_trace(struct trace_array *tr)
 {
        function_reset(tr);
+       atomic_inc(&trace_record_cmdline_enabled);
        tracing_start_function_trace();
 }
 
 static void stop_function_trace(struct trace_array *tr)
 {
        tracing_stop_function_trace();
+       atomic_dec(&trace_record_cmdline_enabled);
 }
 
 static void function_trace_init(struct trace_array *tr)
 
        if (!tracer_enabled)
                return;
 
-       tracing_record_cmdline(prev);
-
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
                  struct task_struct *next)
 {
+       if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
+               tracing_record_cmdline(prev);
+
        /*
         * If tracer_switch_func only points to the local
         * switch func, it still needs the ptr passed to it.
 static void start_sched_trace(struct trace_array *tr)
 {
        sched_switch_reset(tr);
+       atomic_inc(&trace_record_cmdline_enabled);
        tracer_enabled = 1;
 }
 
 static void stop_sched_trace(struct trace_array *tr)
 {
+       atomic_dec(&trace_record_cmdline_enabled);
        tracer_enabled = 0;
 }