4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
17 static struct trace_array *ctx_trace;
18 static int __read_mostly tracer_enabled;
21 ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
23 struct trace_array *tr = ctx_trace;
24 struct trace_array_cpu *data;
32 local_irq_save(flags);
33 cpu = raw_smp_processor_id();
35 disabled = atomic_inc_return(&data->disabled);
37 if (likely(disabled == 1))
38 tracing_sched_switch_trace(tr, data, prev, next, flags);
40 atomic_dec(&data->disabled);
41 local_irq_restore(flags);
45 wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
47 struct trace_array *tr = ctx_trace;
48 struct trace_array_cpu *data;
56 tracing_record_cmdline(curr);
58 local_irq_save(flags);
59 cpu = raw_smp_processor_id();
61 disabled = atomic_inc_return(&data->disabled);
63 if (likely(disabled == 1))
64 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
66 atomic_dec(&data->disabled);
67 local_irq_restore(flags);
71 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
72 struct task_struct *next)
74 if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
75 tracing_record_cmdline(prev);
78 * If tracer_switch_func only points to the local
79 * switch func, it still needs the ptr passed to it.
81 ctx_switch_func(__rq, prev, next);
84 * Chain to the wakeup tracer (this is a NOP if disabled):
86 wakeup_sched_switch(prev, next);
90 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
91 struct task_struct *curr)
93 wakeup_func(__rq, wakee, curr);
96 * Chain to the wakeup tracer (this is a NOP if disabled):
98 wakeup_sched_wakeup(wakee, curr);
102 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
104 struct trace_array *tr = ctx_trace;
105 struct trace_array_cpu *data;
113 local_irq_save(flags);
114 cpu = raw_smp_processor_id();
115 data = tr->data[cpu];
116 disabled = atomic_inc_return(&data->disabled);
118 if (likely(disabled == 1))
119 __trace_special(tr, data, arg1, arg2, arg3);
121 atomic_dec(&data->disabled);
122 local_irq_restore(flags);
125 static void sched_switch_reset(struct trace_array *tr)
129 tr->time_start = ftrace_now(tr->cpu);
131 for_each_online_cpu(cpu)
132 tracing_reset(tr->data[cpu]);
135 static void start_sched_trace(struct trace_array *tr)
137 sched_switch_reset(tr);
138 atomic_inc(&trace_record_cmdline_enabled);
142 static void stop_sched_trace(struct trace_array *tr)
144 atomic_dec(&trace_record_cmdline_enabled);
148 static void sched_switch_trace_init(struct trace_array *tr)
153 start_sched_trace(tr);
156 static void sched_switch_trace_reset(struct trace_array *tr)
159 stop_sched_trace(tr);
162 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
164 /* When starting a new trace, reset the buffers */
166 start_sched_trace(tr);
168 stop_sched_trace(tr);
171 static struct tracer sched_switch_trace __read_mostly =
173 .name = "sched_switch",
174 .init = sched_switch_trace_init,
175 .reset = sched_switch_trace_reset,
176 .ctrl_update = sched_switch_trace_ctrl_update,
177 #ifdef CONFIG_FTRACE_SELFTEST
178 .selftest = trace_selftest_startup_sched_switch,
182 __init static int init_sched_switch_trace(void)
184 return register_tracer(&sched_switch_trace);
186 device_initcall(init_sched_switch_trace);