2 * trace irqs off criticall timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
21 static struct trace_array *irqsoff_trace __read_mostly;
22 static int tracer_enabled __read_mostly;
25 * Sequence count - we record it when starting a measurement and
26 * skip the latency if the sequence has changed - some other section
27 * did a maximum and could disturb our measurement with serial console
28 * printouts, etc. Truly coinciding maximum latencies should be rare
29 * and what happens together happens separately as well, so this doesnt
30 * decrease the validity of the maximum found:
32 static __cacheline_aligned_in_smp unsigned long max_sequence;
36 * irqsoff uses its own tracer function to keep the overhead down:
39 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
41 struct trace_array *tr = irqsoff_trace;
42 struct trace_array_cpu *data;
47 if (likely(!tracer_enabled))
50 local_save_flags(flags);
52 if (!irqs_disabled_flags(flags))
55 cpu = raw_smp_processor_id();
57 disabled = atomic_inc_return(&data->disabled);
59 if (likely(disabled == 1))
60 ftrace(tr, data, ip, parent_ip, flags);
62 atomic_dec(&data->disabled);
65 static struct ftrace_ops trace_ops __read_mostly =
67 .func = irqsoff_tracer_call,
69 #endif /* CONFIG_FTRACE */
72 * Should this new latency be reported/recorded?
74 static int notrace report_latency(cycle_t delta)
77 if (delta < tracing_thresh)
80 if (delta <= tracing_max_latency)
87 check_critical_timing(struct trace_array *tr,
88 struct trace_array_cpu *data,
89 unsigned long parent_ip,
92 unsigned long latency, t0, t1;
93 cycle_t T0, T1, T2, delta;
97 * usecs conversion is slow so we try to delay the conversion
98 * as long as possible:
100 T0 = data->preempt_timestamp;
104 local_save_flags(flags);
106 if (!report_latency(delta))
109 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
111 * Update the timestamp, because the trace entry above
112 * might change it (it can only get larger so the latency
113 * is fair to be reported):
119 latency = nsecs_to_usecs(delta);
121 if (data->critical_sequence != max_sequence)
124 tracing_max_latency = delta;
125 t0 = nsecs_to_usecs(T0);
126 t1 = nsecs_to_usecs(T1);
128 data->critical_end = parent_ip;
130 update_max_tr_single(tr, current, cpu);
133 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
134 "violates %lu us threshold.\n"
135 " => started at timestamp %lu: ",
136 current->comm, current->pid,
137 raw_smp_processor_id(),
138 latency, nsecs_to_usecs(tracing_thresh), t0);
140 printk(KERN_INFO "(%16s-%-5d|#%d):"
141 " new %lu us maximum-latency "
142 "critical section.\n => started at timestamp %lu: ",
143 current->comm, current->pid,
144 raw_smp_processor_id(),
147 print_symbol(KERN_CONT "<%s>\n", data->critical_start);
148 printk(KERN_CONT " => ended at timestamp %lu: ", t1);
149 print_symbol(KERN_CONT "<%s>\n", data->critical_end);
151 t1 = nsecs_to_usecs(now(cpu));
152 printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
157 data->critical_sequence = max_sequence;
158 data->preempt_timestamp = now(cpu);
160 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
163 static inline void notrace
164 start_critical_timing(unsigned long ip, unsigned long parent_ip)
167 struct trace_array *tr = irqsoff_trace;
168 struct trace_array_cpu *data;
171 if (likely(!tracer_enabled))
174 cpu = raw_smp_processor_id();
175 data = tr->data[cpu];
177 if (unlikely(!data) || unlikely(!data->trace) ||
178 data->critical_start || atomic_read(&data->disabled))
181 atomic_inc(&data->disabled);
183 data->critical_sequence = max_sequence;
184 data->preempt_timestamp = now(cpu);
185 data->critical_start = parent_ip;
188 local_save_flags(flags);
189 ftrace(tr, data, ip, parent_ip, flags);
191 atomic_dec(&data->disabled);
194 static inline void notrace
195 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
198 struct trace_array *tr = irqsoff_trace;
199 struct trace_array_cpu *data;
202 if (likely(!tracer_enabled))
205 cpu = raw_smp_processor_id();
206 data = tr->data[cpu];
208 if (unlikely(!data) || unlikely(!data->trace) ||
209 !data->critical_start || atomic_read(&data->disabled))
212 atomic_inc(&data->disabled);
213 local_save_flags(flags);
214 ftrace(tr, data, ip, parent_ip, flags);
215 check_critical_timing(tr, data, parent_ip, cpu);
216 data->critical_start = 0;
217 atomic_dec(&data->disabled);
220 void notrace start_critical_timings(void)
224 local_save_flags(flags);
226 if (irqs_disabled_flags(flags))
227 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
230 void notrace stop_critical_timings(void)
234 local_save_flags(flags);
236 if (irqs_disabled_flags(flags))
237 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
240 #ifdef CONFIG_PROVE_LOCKING
241 void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
245 local_save_flags(flags);
247 if (irqs_disabled_flags(flags))
248 stop_critical_timing(a0, a1);
251 void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
255 local_save_flags(flags);
257 if (irqs_disabled_flags(flags))
258 start_critical_timing(a0, a1);
261 #else /* !CONFIG_PROVE_LOCKING */
267 void early_boot_irqs_off(void)
271 void early_boot_irqs_on(void)
275 void trace_softirqs_on(unsigned long ip)
279 void trace_softirqs_off(unsigned long ip)
283 inline void print_irqtrace_events(struct task_struct *curr)
288 * We are only interested in hardirq on/off events:
290 void notrace trace_hardirqs_on(void)
294 local_save_flags(flags);
296 if (irqs_disabled_flags(flags))
297 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
299 EXPORT_SYMBOL(trace_hardirqs_on);
301 void notrace trace_hardirqs_off(void)
305 local_save_flags(flags);
307 if (irqs_disabled_flags(flags))
308 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
310 EXPORT_SYMBOL(trace_hardirqs_off);
312 void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
316 local_save_flags(flags);
318 if (irqs_disabled_flags(flags))
319 stop_critical_timing(CALLER_ADDR0, caller_addr);
321 EXPORT_SYMBOL(trace_hardirqs_on_caller);
323 void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
327 local_save_flags(flags);
329 if (irqs_disabled_flags(flags))
330 start_critical_timing(CALLER_ADDR0, caller_addr);
332 EXPORT_SYMBOL(trace_hardirqs_off_caller);
334 #endif /* CONFIG_PROVE_LOCKING */
336 static void start_irqsoff_tracer(struct trace_array *tr)
339 register_ftrace_function(&trace_ops);
342 static void stop_irqsoff_tracer(struct trace_array *tr)
344 unregister_ftrace_function(&trace_ops);
348 static void irqsoff_tracer_init(struct trace_array *tr)
351 /* make sure that the tracer is visibel */
355 start_irqsoff_tracer(tr);
358 static void irqsoff_tracer_reset(struct trace_array *tr)
361 stop_irqsoff_tracer(tr);
364 static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
367 start_irqsoff_tracer(tr);
369 stop_irqsoff_tracer(tr);
372 static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
374 /* stop the trace while dumping */
376 stop_irqsoff_tracer(iter->tr);
379 static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
382 start_irqsoff_tracer(iter->tr);
385 static struct tracer irqsoff_tracer __read_mostly =
388 .init = irqsoff_tracer_init,
389 .reset = irqsoff_tracer_reset,
390 .open = irqsoff_tracer_open,
391 .close = irqsoff_tracer_close,
392 .ctrl_update = irqsoff_tracer_ctrl_update,
396 __init static int init_irqsoff_tracer(void)
398 register_tracer(&irqsoff_tracer);
402 device_initcall(init_irqsoff_tracer);