1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11 #include <trace/boot.h>
12 #include <trace/kmemtrace.h>
13 #include <trace/power.h>
16 __TRACE_FIRST_TYPE = 0,
44 * The trace entry - the most basic unit of tracing. This is what
45 * is printed in the end as a single line in the trace output, such as:
47 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
52 unsigned char preempt_count;
58 * Function trace entry - function address and parent function addres:
61 struct trace_entry ent;
63 unsigned long parent_ip;
66 /* Function call entry */
67 struct ftrace_graph_ent_entry {
68 struct trace_entry ent;
69 struct ftrace_graph_ent graph_ent;
72 /* Function return entry */
73 struct ftrace_graph_ret_entry {
74 struct trace_entry ent;
75 struct ftrace_graph_ret ret;
77 extern struct tracer boot_tracer;
80 * Context switch trace entry - which task (and prio) we switched from/to:
82 struct ctx_switch_entry {
83 struct trace_entry ent;
84 unsigned int prev_pid;
85 unsigned char prev_prio;
86 unsigned char prev_state;
87 unsigned int next_pid;
88 unsigned char next_prio;
89 unsigned char next_state;
90 unsigned int next_cpu;
94 * Special (free-form) trace entry:
96 struct special_entry {
97 struct trace_entry ent;
107 #define FTRACE_STACK_ENTRIES 8
110 struct trace_entry ent;
111 unsigned long caller[FTRACE_STACK_ENTRIES];
114 struct userstack_entry {
115 struct trace_entry ent;
116 unsigned long caller[FTRACE_STACK_ENTRIES];
120 * trace_printk entry:
123 struct trace_entry ent;
130 #define TRACE_OLD_SIZE 88
132 struct trace_field_cont {
134 /* Temporary till we get rid of this completely */
135 char buf[TRACE_OLD_SIZE - 1];
138 struct trace_mmiotrace_rw {
139 struct trace_entry ent;
140 struct mmiotrace_rw rw;
143 struct trace_mmiotrace_map {
144 struct trace_entry ent;
145 struct mmiotrace_map map;
148 struct trace_boot_call {
149 struct trace_entry ent;
150 struct boot_trace_call boot_call;
153 struct trace_boot_ret {
154 struct trace_entry ent;
155 struct boot_trace_ret boot_ret;
158 #define TRACE_FUNC_SIZE 30
159 #define TRACE_FILE_SIZE 20
160 struct trace_branch {
161 struct trace_entry ent;
163 char func[TRACE_FUNC_SIZE+1];
164 char file[TRACE_FILE_SIZE+1];
168 struct hw_branch_entry {
169 struct trace_entry ent;
175 struct trace_entry ent;
176 struct power_trace state_data;
179 struct kmemtrace_alloc_entry {
180 struct trace_entry ent;
181 enum kmemtrace_type_id type_id;
182 unsigned long call_site;
190 struct kmemtrace_free_entry {
191 struct trace_entry ent;
192 enum kmemtrace_type_id type_id;
193 unsigned long call_site;
198 * trace_flag_type is an enumeration that holds different
199 * states when a trace occurs. These are:
200 * IRQS_OFF - interrupts were disabled
201 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
202 * NEED_RESCED - reschedule is requested
203 * HARDIRQ - inside an interrupt handler
204 * SOFTIRQ - inside a softirq handler
206 enum trace_flag_type {
207 TRACE_FLAG_IRQS_OFF = 0x01,
208 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
209 TRACE_FLAG_NEED_RESCHED = 0x04,
210 TRACE_FLAG_HARDIRQ = 0x08,
211 TRACE_FLAG_SOFTIRQ = 0x10,
214 #define TRACE_BUF_SIZE 1024
217 * The CPU trace array - it consists of thousands of trace entries
218 * plus some other descriptor data: (for example which task started
221 struct trace_array_cpu {
223 void *buffer_page; /* ring buffer spare */
225 /* these fields get copied into max-trace: */
226 unsigned long trace_idx;
227 unsigned long overrun;
228 unsigned long saved_latency;
229 unsigned long critical_start;
230 unsigned long critical_end;
231 unsigned long critical_sequence;
233 unsigned long policy;
234 unsigned long rt_priority;
235 cycle_t preempt_timestamp;
238 char comm[TASK_COMM_LEN];
241 struct trace_iterator;
244 * The trace array - an array of per-CPU trace arrays. This is the
245 * highest level data structure that individual tracers deal with.
246 * They have on/off state as well:
249 struct ring_buffer *buffer;
250 unsigned long entries;
253 struct task_struct *waiter;
254 struct trace_array_cpu *data[NR_CPUS];
257 #define FTRACE_CMP_TYPE(var, type) \
258 __builtin_types_compatible_p(typeof(var), type *)
261 #define IF_ASSIGN(var, entry, etype, id) \
262 if (FTRACE_CMP_TYPE(var, etype)) { \
263 var = (typeof(var))(entry); \
264 WARN_ON(id && (entry)->type != id); \
268 /* Will cause compile errors if type is not found. */
269 extern void __ftrace_bad_type(void);
272 * The trace_assign_type is a verifier that the entry type is
273 * the same as the type being assigned. To add new types simply
274 * add a line with the following format:
276 * IF_ASSIGN(var, ent, type, id);
278 * Where "type" is the trace type that includes the trace_entry
279 * as the "ent" item. And "id" is the trace identifier that is
280 * used in the trace_type enum.
282 * If the type can have more than one id, then use zero.
284 #define trace_assign_type(var, ent) \
286 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
287 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
288 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
289 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
290 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
291 IF_ASSIGN(var, ent, struct special_entry, 0); \
292 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
294 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
296 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
297 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
298 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
299 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
301 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
303 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
304 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
305 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
307 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
309 __ftrace_bad_type(); \
312 /* Return values for print_line callback */
314 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
315 TRACE_TYPE_HANDLED = 1,
316 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
317 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
322 * An option specific to a tracer. This is a boolean value.
323 * The bit is the bit index that sets its value on the
324 * flags value in struct tracer_flags.
327 const char *name; /* Will appear on the trace_options file */
328 u32 bit; /* Mask assigned in val field in tracer_flags */
332 * The set of specific options for a tracer. Your tracer
333 * have to set the initial value of the flags val.
335 struct tracer_flags {
337 struct tracer_opt *opts;
340 /* Makes more easy to define a tracer opt */
341 #define TRACER_OPT(s, b) .name = #s, .bit = b
345 * struct tracer - a specific tracer and its callbacks to interact with debugfs
346 * @name: the name chosen to select it on the available_tracers file
347 * @init: called when one switches to this tracer (echo name > current_tracer)
348 * @reset: called when one switches to another tracer
349 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
350 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
351 * @open: called when the trace file is opened
352 * @pipe_open: called when the trace_pipe file is opened
353 * @wait_pipe: override how the user waits for traces on trace_pipe
354 * @close: called when the trace file is released
355 * @read: override the default read callback on trace_pipe
356 * @splice_read: override the default splice_read callback on trace_pipe
357 * @selftest: selftest to run on boot (see trace_selftest.c)
358 * @print_headers: override the first lines that describe your columns
359 * @print_line: callback that prints a trace
360 * @set_flag: signals one of your private flags changed (trace_options file)
361 * @flags: your private flags
365 int (*init)(struct trace_array *tr);
366 void (*reset)(struct trace_array *tr);
367 void (*start)(struct trace_array *tr);
368 void (*stop)(struct trace_array *tr);
369 void (*open)(struct trace_iterator *iter);
370 void (*pipe_open)(struct trace_iterator *iter);
371 void (*wait_pipe)(struct trace_iterator *iter);
372 void (*close)(struct trace_iterator *iter);
373 ssize_t (*read)(struct trace_iterator *iter,
374 struct file *filp, char __user *ubuf,
375 size_t cnt, loff_t *ppos);
376 ssize_t (*splice_read)(struct trace_iterator *iter,
379 struct pipe_inode_info *pipe,
382 #ifdef CONFIG_FTRACE_STARTUP_TEST
383 int (*selftest)(struct tracer *trace,
384 struct trace_array *tr);
386 void (*print_header)(struct seq_file *m);
387 enum print_line_t (*print_line)(struct trace_iterator *iter);
388 /* If you handled the flag setting, return 0 */
389 int (*set_flag)(u32 old_flags, u32 bit, int set);
392 struct tracer_flags *flags;
393 struct tracer_stat *stats;
397 unsigned char buffer[PAGE_SIZE];
399 unsigned int readpos;
403 trace_seq_init(struct trace_seq *s)
410 #define TRACE_PIPE_ALL_CPU -1
413 * Trace iterator - used by printout routines who present trace
414 * results to users and which routines might sleep, etc:
416 struct trace_iterator {
417 struct trace_array *tr;
418 struct tracer *trace;
422 struct ring_buffer_iter *buffer_iter[NR_CPUS];
424 /* The below is zeroed out in pipe_read */
425 struct trace_seq seq;
426 struct trace_entry *ent;
430 unsigned long iter_flags;
434 cpumask_var_t started;
437 int tracer_init(struct tracer *t, struct trace_array *tr);
438 int tracing_is_enabled(void);
439 void trace_wake_up(void);
440 void tracing_reset(struct trace_array *tr, int cpu);
441 void tracing_reset_online_cpus(struct trace_array *tr);
442 int tracing_open_generic(struct inode *inode, struct file *filp);
443 struct dentry *tracing_init_dentry(void);
444 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
446 struct ring_buffer_event;
448 struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
453 void trace_buffer_unlock_commit(struct trace_array *tr,
454 struct ring_buffer_event *event,
455 unsigned long flags, int pc);
457 struct ring_buffer_event *
458 trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
459 unsigned long flags, int pc);
460 void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
461 unsigned long flags, int pc);
463 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
464 struct trace_array_cpu *data);
466 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
467 int *ent_cpu, u64 *ent_ts);
469 void tracing_generic_entry_update(struct trace_entry *entry,
473 void default_wait_pipe(struct trace_iterator *iter);
474 void poll_wait_pipe(struct trace_iterator *iter);
476 void ftrace(struct trace_array *tr,
477 struct trace_array_cpu *data,
479 unsigned long parent_ip,
480 unsigned long flags, int pc);
481 void tracing_sched_switch_trace(struct trace_array *tr,
482 struct task_struct *prev,
483 struct task_struct *next,
484 unsigned long flags, int pc);
485 void tracing_record_cmdline(struct task_struct *tsk);
487 void tracing_sched_wakeup_trace(struct trace_array *tr,
488 struct task_struct *wakee,
489 struct task_struct *cur,
490 unsigned long flags, int pc);
491 void trace_special(struct trace_array *tr,
492 struct trace_array_cpu *data,
495 unsigned long arg3, int pc);
496 void trace_function(struct trace_array *tr,
498 unsigned long parent_ip,
499 unsigned long flags, int pc);
501 void trace_graph_return(struct ftrace_graph_ret *trace);
502 int trace_graph_entry(struct ftrace_graph_ent *trace);
504 void tracing_start_cmdline_record(void);
505 void tracing_stop_cmdline_record(void);
506 void tracing_sched_switch_assign_trace(struct trace_array *tr);
507 void tracing_stop_sched_switch_record(void);
508 void tracing_start_sched_switch_record(void);
509 int register_tracer(struct tracer *type);
510 void unregister_tracer(struct tracer *type);
512 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
514 extern unsigned long tracing_max_latency;
515 extern unsigned long tracing_thresh;
517 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
518 void update_max_tr_single(struct trace_array *tr,
519 struct task_struct *tsk, int cpu);
521 void __trace_stack(struct trace_array *tr,
525 extern cycle_t ftrace_now(int cpu);
527 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
529 (*tracer_switch_func_t)(void *private,
531 struct task_struct *prev,
532 struct task_struct *next);
534 struct tracer_switch_ops {
535 tracer_switch_func_t func;
537 struct tracer_switch_ops *next;
539 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
541 extern char *trace_find_cmdline(int pid);
543 #ifdef CONFIG_DYNAMIC_FTRACE
544 extern unsigned long ftrace_update_tot_cnt;
545 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
546 extern int DYN_FTRACE_TEST_NAME(void);
549 #ifdef CONFIG_FTRACE_STARTUP_TEST
550 extern int trace_selftest_startup_function(struct tracer *trace,
551 struct trace_array *tr);
552 extern int trace_selftest_startup_function_graph(struct tracer *trace,
553 struct trace_array *tr);
554 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
555 struct trace_array *tr);
556 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
557 struct trace_array *tr);
558 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
559 struct trace_array *tr);
560 extern int trace_selftest_startup_wakeup(struct tracer *trace,
561 struct trace_array *tr);
562 extern int trace_selftest_startup_nop(struct tracer *trace,
563 struct trace_array *tr);
564 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
565 struct trace_array *tr);
566 extern int trace_selftest_startup_sysprof(struct tracer *trace,
567 struct trace_array *tr);
568 extern int trace_selftest_startup_branch(struct tracer *trace,
569 struct trace_array *tr);
570 #endif /* CONFIG_FTRACE_STARTUP_TEST */
572 extern void *head_page(struct trace_array_cpu *data);
573 extern long ns2usecs(cycle_t nsec);
575 trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
577 extern unsigned long trace_flags;
579 /* Standard output formatting function used for function return traces */
580 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
581 extern enum print_line_t print_graph_function(struct trace_iterator *iter);
583 #ifdef CONFIG_DYNAMIC_FTRACE
584 /* TODO: make this variable */
585 #define FTRACE_GRAPH_MAX_FUNCS 32
586 extern int ftrace_graph_count;
587 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
589 static inline int ftrace_graph_addr(unsigned long addr)
593 if (!ftrace_graph_count || test_tsk_trace_graph(current))
596 for (i = 0; i < ftrace_graph_count; i++) {
597 if (addr == ftrace_graph_funcs[i])
604 static inline int ftrace_trace_addr(unsigned long addr)
608 static inline int ftrace_graph_addr(unsigned long addr)
612 #endif /* CONFIG_DYNAMIC_FTRACE */
614 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
615 static inline enum print_line_t
616 print_graph_function(struct trace_iterator *iter)
618 return TRACE_TYPE_UNHANDLED;
620 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
622 extern struct pid *ftrace_pid_trace;
624 static inline int ftrace_trace_task(struct task_struct *task)
626 if (!ftrace_pid_trace)
629 return test_tsk_trace_trace(task);
633 * trace_iterator_flags is an enumeration that defines bit
634 * positions into trace_flags that controls the output.
636 * NOTE: These bits must match the trace_options array in
639 enum trace_iterator_flags {
640 TRACE_ITER_PRINT_PARENT = 0x01,
641 TRACE_ITER_SYM_OFFSET = 0x02,
642 TRACE_ITER_SYM_ADDR = 0x04,
643 TRACE_ITER_VERBOSE = 0x08,
644 TRACE_ITER_RAW = 0x10,
645 TRACE_ITER_HEX = 0x20,
646 TRACE_ITER_BIN = 0x40,
647 TRACE_ITER_BLOCK = 0x80,
648 TRACE_ITER_STACKTRACE = 0x100,
649 TRACE_ITER_SCHED_TREE = 0x200,
650 TRACE_ITER_PRINTK = 0x400,
651 TRACE_ITER_PREEMPTONLY = 0x800,
652 TRACE_ITER_BRANCH = 0x1000,
653 TRACE_ITER_ANNOTATE = 0x2000,
654 TRACE_ITER_USERSTACKTRACE = 0x4000,
655 TRACE_ITER_SYM_USEROBJ = 0x8000,
656 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
657 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
658 TRACE_ITER_LATENCY_FMT = 0x40000,
662 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
663 * control the output of kernel symbols.
665 #define TRACE_ITER_SYM_MASK \
666 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
668 extern struct tracer nop_trace;
671 * ftrace_preempt_disable - disable preemption scheduler safe
673 * When tracing can happen inside the scheduler, there exists
674 * cases that the tracing might happen before the need_resched
675 * flag is checked. If this happens and the tracer calls
676 * preempt_enable (after a disable), a schedule might take place
677 * causing an infinite recursion.
679 * To prevent this, we read the need_resched flag before
680 * disabling preemption. When we want to enable preemption we
681 * check the flag, if it is set, then we call preempt_enable_no_resched.
682 * Otherwise, we call preempt_enable.
684 * The rational for doing the above is that if need_resched is set
685 * and we have yet to reschedule, we are either in an atomic location
686 * (where we do not need to check for scheduling) or we are inside
687 * the scheduler and do not want to resched.
689 static inline int ftrace_preempt_disable(void)
693 resched = need_resched();
694 preempt_disable_notrace();
700 * ftrace_preempt_enable - enable preemption scheduler safe
701 * @resched: the return value from ftrace_preempt_disable
703 * This is a scheduler safe way to enable preemption and not miss
704 * any preemption checks. The disabled saved the state of preemption.
705 * If resched is set, then we are either inside an atomic or
706 * are inside the scheduler (we would have already scheduled
707 * otherwise). In this case, we do not want to call normal
708 * preempt_enable, but preempt_enable_no_resched instead.
710 static inline void ftrace_preempt_enable(int resched)
713 preempt_enable_no_resched_notrace();
715 preempt_enable_notrace();
718 #ifdef CONFIG_BRANCH_TRACER
719 extern int enable_branch_tracing(struct trace_array *tr);
720 extern void disable_branch_tracing(void);
721 static inline int trace_branch_enable(struct trace_array *tr)
723 if (trace_flags & TRACE_ITER_BRANCH)
724 return enable_branch_tracing(tr);
727 static inline void trace_branch_disable(void)
729 /* due to races, always disable */
730 disable_branch_tracing();
733 static inline int trace_branch_enable(struct trace_array *tr)
737 static inline void trace_branch_disable(void)
740 #endif /* CONFIG_BRANCH_TRACER */
742 /* trace event type bit fields, not numeric */
744 TRACE_EVENT_TYPE_PRINTF = 1,
745 TRACE_EVENT_TYPE_RAW = 2,
748 struct ftrace_event_call {
753 int (*regfunc)(void);
754 void (*unregfunc)(void);
756 int (*raw_init)(void);
757 int (*show_format)(struct trace_seq *s);
760 void event_trace_printk(unsigned long ip, const char *fmt, ...);
761 extern struct ftrace_event_call __start_ftrace_events[];
762 extern struct ftrace_event_call __stop_ftrace_events[];
764 #endif /* _LINUX_KERNEL_TRACE_H */