]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/trace_sched_switch.c
a3376478fc2cfab0170a5d09d85c454ef39227fe
[linux-2.6-omap-h63xx.git] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19
20 static void
21 ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
22 {
23         struct trace_array *tr = ctx_trace;
24         struct trace_array_cpu *data;
25         unsigned long flags;
26         long disabled;
27         int cpu;
28
29         if (!tracer_enabled)
30                 return;
31
32         local_irq_save(flags);
33         cpu = raw_smp_processor_id();
34         data = tr->data[cpu];
35         disabled = atomic_inc_return(&data->disabled);
36
37         if (likely(disabled == 1))
38                 tracing_sched_switch_trace(tr, data, prev, next, flags);
39
40         atomic_dec(&data->disabled);
41         local_irq_restore(flags);
42 }
43
44 static void
45 wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
46 {
47         struct trace_array *tr = ctx_trace;
48         struct trace_array_cpu *data;
49         unsigned long flags;
50         long disabled;
51         int cpu;
52
53         if (!tracer_enabled)
54                 return;
55
56         tracing_record_cmdline(curr);
57
58         local_irq_save(flags);
59         cpu = raw_smp_processor_id();
60         data = tr->data[cpu];
61         disabled = atomic_inc_return(&data->disabled);
62
63         if (likely(disabled == 1))
64                 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
65
66         atomic_dec(&data->disabled);
67         local_irq_restore(flags);
68 }
69
70 void
71 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
72                   struct task_struct *next)
73 {
74         if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
75                 tracing_record_cmdline(prev);
76
77         /*
78          * If tracer_switch_func only points to the local
79          * switch func, it still needs the ptr passed to it.
80          */
81         ctx_switch_func(__rq, prev, next);
82
83         /*
84          * Chain to the wakeup tracer (this is a NOP if disabled):
85          */
86         wakeup_sched_switch(prev, next);
87 }
88
89 void
90 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
91                     struct task_struct *curr)
92 {
93         wakeup_func(__rq, wakee, curr);
94
95         /*
96          * Chain to the wakeup tracer (this is a NOP if disabled):
97          */
98         wakeup_sched_wakeup(wakee, curr);
99 }
100
101 void
102 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
103 {
104         struct trace_array *tr = ctx_trace;
105         struct trace_array_cpu *data;
106         unsigned long flags;
107         long disabled;
108         int cpu;
109
110         if (!tracer_enabled)
111                 return;
112
113         local_irq_save(flags);
114         cpu = raw_smp_processor_id();
115         data = tr->data[cpu];
116         disabled = atomic_inc_return(&data->disabled);
117
118         if (likely(disabled == 1))
119                 __trace_special(tr, data, arg1, arg2, arg3);
120
121         atomic_dec(&data->disabled);
122         local_irq_restore(flags);
123 }
124
125 static void sched_switch_reset(struct trace_array *tr)
126 {
127         int cpu;
128
129         tr->time_start = ftrace_now(tr->cpu);
130
131         for_each_online_cpu(cpu)
132                 tracing_reset(tr->data[cpu]);
133 }
134
135 static void start_sched_trace(struct trace_array *tr)
136 {
137         sched_switch_reset(tr);
138         atomic_inc(&trace_record_cmdline_enabled);
139         tracer_enabled = 1;
140 }
141
142 static void stop_sched_trace(struct trace_array *tr)
143 {
144         atomic_dec(&trace_record_cmdline_enabled);
145         tracer_enabled = 0;
146 }
147
148 static void sched_switch_trace_init(struct trace_array *tr)
149 {
150         ctx_trace = tr;
151
152         if (tr->ctrl)
153                 start_sched_trace(tr);
154 }
155
156 static void sched_switch_trace_reset(struct trace_array *tr)
157 {
158         if (tr->ctrl)
159                 stop_sched_trace(tr);
160 }
161
162 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
163 {
164         /* When starting a new trace, reset the buffers */
165         if (tr->ctrl)
166                 start_sched_trace(tr);
167         else
168                 stop_sched_trace(tr);
169 }
170
171 static struct tracer sched_switch_trace __read_mostly =
172 {
173         .name           = "sched_switch",
174         .init           = sched_switch_trace_init,
175         .reset          = sched_switch_trace_reset,
176         .ctrl_update    = sched_switch_trace_ctrl_update,
177 #ifdef CONFIG_FTRACE_SELFTEST
178         .selftest    = trace_selftest_startup_sched_switch,
179 #endif
180 };
181
182 __init static int init_sched_switch_trace(void)
183 {
184         return register_tracer(&sched_switch_trace);
185 }
186 device_initcall(init_sched_switch_trace);