2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
43 static int ftrace_disabled __read_mostly;
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
48 static struct ftrace_ops ftrace_list_end __read_mostly =
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
58 struct ftrace_ops *op = ftrace_list;
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
63 while (op != &ftrace_list_end) {
65 read_barrier_depends();
66 op->func(ip, parent_ip);
72 * clear_ftrace_function - reset the ftrace function
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
77 void clear_ftrace_function(void)
79 ftrace_trace_function = ftrace_stub;
82 static int __register_ftrace_function(struct ftrace_ops *ops)
84 /* should not be called from interrupt context */
85 spin_lock(&ftrace_lock);
87 ops->next = ftrace_list;
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
105 ftrace_trace_function = ftrace_list_func;
108 spin_unlock(&ftrace_lock);
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p;
118 /* should not be called from interrupt context */
119 spin_lock(&ftrace_lock);
122 * If we are removing the last function, then simply point
123 * to the ftrace_stub.
125 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126 ftrace_trace_function = ftrace_stub;
127 ftrace_list = &ftrace_list_end;
131 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
142 if (ftrace_enabled) {
143 /* If we only have one func left, then call that directly */
144 if (ftrace_list == &ftrace_list_end ||
145 ftrace_list->next == &ftrace_list_end)
146 ftrace_trace_function = ftrace_list->func;
150 spin_unlock(&ftrace_lock);
155 #ifdef CONFIG_DYNAMIC_FTRACE
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
163 static DEFINE_SPINLOCK(ftrace_hash_lock);
164 #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167 static void ftrace_release_hash(unsigned long start, unsigned long end);
169 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
170 #define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
171 #define ftrace_hash_unlock(flags) do { } while(0)
172 static inline void ftrace_release_hash(unsigned long start, unsigned long end)
178 * Since MCOUNT_ADDR may point to mcount itself, we do not want
179 * to get it confused by reading a reference in the code as we
180 * are parsing on objcopy output of text. Use a variable for
183 static unsigned long mcount_addr = MCOUNT_ADDR;
185 static struct task_struct *ftraced_task;
188 FTRACE_ENABLE_CALLS = (1 << 0),
189 FTRACE_DISABLE_CALLS = (1 << 1),
190 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
191 FTRACE_ENABLE_MCOUNT = (1 << 3),
192 FTRACE_DISABLE_MCOUNT = (1 << 4),
195 static int ftrace_filtered;
196 static int tracing_on;
197 static int frozen_record_count;
199 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
201 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
203 static DEFINE_MUTEX(ftraced_lock);
204 static DEFINE_MUTEX(ftrace_regex_lock);
207 struct ftrace_page *next;
209 struct dyn_ftrace records[];
212 #define ENTRIES_PER_PAGE \
213 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
215 /* estimate from running different kernels */
216 #define NR_TO_INIT 10000
218 static struct ftrace_page *ftrace_pages_start;
219 static struct ftrace_page *ftrace_pages;
221 static int ftraced_trigger;
222 static int ftraced_suspend;
223 static int ftraced_stop;
225 static int ftrace_record_suspend;
227 static struct dyn_ftrace *ftrace_free_records;
230 #ifdef CONFIG_KPROBES
231 static inline void freeze_record(struct dyn_ftrace *rec)
233 if (!(rec->flags & FTRACE_FL_FROZEN)) {
234 rec->flags |= FTRACE_FL_FROZEN;
235 frozen_record_count++;
239 static inline void unfreeze_record(struct dyn_ftrace *rec)
241 if (rec->flags & FTRACE_FL_FROZEN) {
242 rec->flags &= ~FTRACE_FL_FROZEN;
243 frozen_record_count--;
247 static inline int record_frozen(struct dyn_ftrace *rec)
249 return rec->flags & FTRACE_FL_FROZEN;
252 # define freeze_record(rec) ({ 0; })
253 # define unfreeze_record(rec) ({ 0; })
254 # define record_frozen(rec) ({ 0; })
255 #endif /* CONFIG_KPROBES */
257 int skip_trace(unsigned long ip)
260 struct dyn_ftrace *rec;
261 struct hlist_node *t;
262 struct hlist_head *head;
264 if (frozen_record_count == 0)
267 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
268 hlist_for_each_entry_rcu(rec, t, head, node) {
270 if (record_frozen(rec)) {
271 if (rec->flags & FTRACE_FL_FAILED)
274 if (!(rec->flags & FTRACE_FL_CONVERTED))
277 if (!tracing_on || !ftrace_enabled)
280 if (ftrace_filtered) {
281 fl = rec->flags & (FTRACE_FL_FILTER |
283 if (!fl || (fl & FTRACE_FL_NOTRACE))
295 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
297 struct dyn_ftrace *p;
298 struct hlist_node *t;
301 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
312 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
314 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
317 /* called from kstop_machine */
318 static inline void ftrace_del_hash(struct dyn_ftrace *node)
320 hlist_del(&node->node);
323 static void ftrace_free_rec(struct dyn_ftrace *rec)
325 rec->ip = (unsigned long)ftrace_free_records;
326 ftrace_free_records = rec;
327 rec->flags |= FTRACE_FL_FREE;
330 void ftrace_release(void *start, unsigned long size)
332 struct dyn_ftrace *rec;
333 struct ftrace_page *pg;
334 unsigned long s = (unsigned long)start;
335 unsigned long e = s + size;
338 if (ftrace_disabled || !start)
341 /* should not be called from interrupt context */
342 spin_lock(&ftrace_lock);
344 for (pg = ftrace_pages_start; pg; pg = pg->next) {
345 for (i = 0; i < pg->index; i++) {
346 rec = &pg->records[i];
348 if ((rec->ip >= s) && (rec->ip < e))
349 ftrace_free_rec(rec);
352 spin_unlock(&ftrace_lock);
354 ftrace_release_hash(s, e);
357 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
359 struct dyn_ftrace *rec;
361 /* First check for freed records */
362 if (ftrace_free_records) {
363 rec = ftrace_free_records;
365 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
367 ftrace_free_records = NULL;
373 ftrace_free_records = (void *)rec->ip;
374 memset(rec, 0, sizeof(*rec));
378 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
379 if (!ftrace_pages->next)
381 ftrace_pages = ftrace_pages->next;
384 return &ftrace_pages->records[ftrace_pages->index++];
388 ftrace_record_ip(unsigned long ip)
390 struct dyn_ftrace *node;
396 if (!ftrace_enabled || ftrace_disabled)
399 resched = need_resched();
400 preempt_disable_notrace();
403 * We simply need to protect against recursion.
404 * Use the the raw version of smp_processor_id and not
405 * __get_cpu_var which can call debug hooks that can
406 * cause a recursive crash here.
408 cpu = raw_smp_processor_id();
409 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
410 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
413 if (unlikely(ftrace_record_suspend))
416 key = hash_long(ip, FTRACE_HASHBITS);
418 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
420 if (ftrace_ip_in_hash(ip, key))
423 ftrace_hash_lock(flags);
425 /* This ip may have hit the hash before the lock */
426 if (ftrace_ip_in_hash(ip, key))
429 node = ftrace_alloc_dyn_node(ip);
435 ftrace_add_hash(node, key);
440 ftrace_hash_unlock(flags);
442 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
444 /* prevent recursion with scheduler */
446 preempt_enable_no_resched_notrace();
448 preempt_enable_notrace();
451 #define FTRACE_ADDR ((long)(ftrace_caller))
454 __ftrace_replace_code(struct dyn_ftrace *rec,
455 unsigned char *old, unsigned char *new, int enable)
457 unsigned long ip, fl;
461 if (ftrace_filtered && enable) {
463 * If filtering is on:
465 * If this record is set to be filtered and
466 * is enabled then do nothing.
468 * If this record is set to be filtered and
469 * it is not enabled, enable it.
471 * If this record is not set to be filtered
472 * and it is not enabled do nothing.
474 * If this record is set not to trace then
477 * If this record is set not to trace and
478 * it is enabled then disable it.
480 * If this record is not set to be filtered and
481 * it is enabled, disable it.
484 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
487 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
488 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
489 !fl || (fl == FTRACE_FL_NOTRACE))
493 * If it is enabled disable it,
494 * otherwise enable it!
496 if (fl & FTRACE_FL_ENABLED) {
497 /* swap new and old */
499 old = ftrace_call_replace(ip, FTRACE_ADDR);
500 rec->flags &= ~FTRACE_FL_ENABLED;
502 new = ftrace_call_replace(ip, FTRACE_ADDR);
503 rec->flags |= FTRACE_FL_ENABLED;
509 * If this record is set not to trace and is
510 * not enabled, do nothing.
512 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
513 if (fl == FTRACE_FL_NOTRACE)
516 new = ftrace_call_replace(ip, FTRACE_ADDR);
518 old = ftrace_call_replace(ip, FTRACE_ADDR);
521 if (rec->flags & FTRACE_FL_ENABLED)
523 rec->flags |= FTRACE_FL_ENABLED;
525 if (!(rec->flags & FTRACE_FL_ENABLED))
527 rec->flags &= ~FTRACE_FL_ENABLED;
531 return ftrace_modify_code(ip, old, new);
534 static void ftrace_replace_code(int enable)
537 unsigned char *new = NULL, *old = NULL;
538 struct dyn_ftrace *rec;
539 struct ftrace_page *pg;
542 old = ftrace_nop_replace();
544 new = ftrace_nop_replace();
546 for (pg = ftrace_pages_start; pg; pg = pg->next) {
547 for (i = 0; i < pg->index; i++) {
548 rec = &pg->records[i];
550 /* don't modify code that has already faulted */
551 if (rec->flags & FTRACE_FL_FAILED)
554 /* ignore updates to this record's mcount site */
555 if (get_kprobe((void *)rec->ip)) {
559 unfreeze_record(rec);
562 failed = __ftrace_replace_code(rec, old, new, enable);
563 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
564 rec->flags |= FTRACE_FL_FAILED;
565 if ((system_state == SYSTEM_BOOTING) ||
566 !core_kernel_text(rec->ip)) {
567 ftrace_del_hash(rec);
568 ftrace_free_rec(rec);
575 static void ftrace_shutdown_replenish(void)
577 if (ftrace_pages->next)
580 /* allocate another page */
581 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
584 static void print_ip_ins(const char *fmt, unsigned char *p)
588 printk(KERN_CONT "%s", fmt);
590 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
591 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
595 ftrace_code_disable(struct dyn_ftrace *rec)
598 unsigned char *nop, *call;
603 nop = ftrace_nop_replace();
604 call = ftrace_call_replace(ip, mcount_addr);
606 ret = ftrace_modify_code(ip, call, nop);
611 pr_info("ftrace faulted on modifying ");
616 pr_info("ftrace failed to modify ");
618 print_ip_ins(" expected: ", call);
619 print_ip_ins(" actual: ", (unsigned char *)ip);
620 print_ip_ins(" replace: ", nop);
621 printk(KERN_CONT "\n");
625 pr_info("ftrace faulted on writing ");
630 pr_info("ftrace faulted on unknown error ");
634 rec->flags |= FTRACE_FL_FAILED;
640 static int __ftrace_update_code(void *ignore);
642 static int __ftrace_modify_code(void *data)
647 if (*command & FTRACE_ENABLE_CALLS) {
649 * Update any recorded ips now that we have the
652 __ftrace_update_code(NULL);
653 ftrace_replace_code(1);
655 } else if (*command & FTRACE_DISABLE_CALLS) {
656 ftrace_replace_code(0);
660 if (*command & FTRACE_UPDATE_TRACE_FUNC)
661 ftrace_update_ftrace_func(ftrace_trace_function);
663 if (*command & FTRACE_ENABLE_MCOUNT) {
664 addr = (unsigned long)ftrace_record_ip;
665 ftrace_mcount_set(&addr);
666 } else if (*command & FTRACE_DISABLE_MCOUNT) {
667 addr = (unsigned long)ftrace_stub;
668 ftrace_mcount_set(&addr);
674 static void ftrace_run_update_code(int command)
676 stop_machine(__ftrace_modify_code, &command, NULL);
679 void ftrace_disable_daemon(void)
681 /* Stop the daemon from calling kstop_machine */
682 mutex_lock(&ftraced_lock);
684 mutex_unlock(&ftraced_lock);
686 ftrace_force_update();
689 void ftrace_enable_daemon(void)
691 mutex_lock(&ftraced_lock);
693 mutex_unlock(&ftraced_lock);
695 ftrace_force_update();
698 static ftrace_func_t saved_ftrace_func;
700 static void ftrace_startup(void)
704 if (unlikely(ftrace_disabled))
707 mutex_lock(&ftraced_lock);
709 if (ftraced_suspend == 1)
710 command |= FTRACE_ENABLE_CALLS;
712 if (saved_ftrace_func != ftrace_trace_function) {
713 saved_ftrace_func = ftrace_trace_function;
714 command |= FTRACE_UPDATE_TRACE_FUNC;
717 if (!command || !ftrace_enabled)
720 ftrace_run_update_code(command);
722 mutex_unlock(&ftraced_lock);
725 static void ftrace_shutdown(void)
729 if (unlikely(ftrace_disabled))
732 mutex_lock(&ftraced_lock);
734 if (!ftraced_suspend)
735 command |= FTRACE_DISABLE_CALLS;
737 if (saved_ftrace_func != ftrace_trace_function) {
738 saved_ftrace_func = ftrace_trace_function;
739 command |= FTRACE_UPDATE_TRACE_FUNC;
742 if (!command || !ftrace_enabled)
745 ftrace_run_update_code(command);
747 mutex_unlock(&ftraced_lock);
750 static void ftrace_startup_sysctl(void)
752 int command = FTRACE_ENABLE_MCOUNT;
754 if (unlikely(ftrace_disabled))
757 mutex_lock(&ftraced_lock);
758 /* Force update next time */
759 saved_ftrace_func = NULL;
760 /* ftraced_suspend is true if we want ftrace running */
762 command |= FTRACE_ENABLE_CALLS;
764 ftrace_run_update_code(command);
765 mutex_unlock(&ftraced_lock);
768 static void ftrace_shutdown_sysctl(void)
770 int command = FTRACE_DISABLE_MCOUNT;
772 if (unlikely(ftrace_disabled))
775 mutex_lock(&ftraced_lock);
776 /* ftraced_suspend is true if ftrace is running */
778 command |= FTRACE_DISABLE_CALLS;
780 ftrace_run_update_code(command);
781 mutex_unlock(&ftraced_lock);
784 static cycle_t ftrace_update_time;
785 static unsigned long ftrace_update_cnt;
786 unsigned long ftrace_update_tot_cnt;
788 static int __ftrace_update_code(void *ignore)
790 int i, save_ftrace_enabled;
792 struct dyn_ftrace *p;
793 struct hlist_node *t, *n;
794 struct hlist_head *head, temp_list;
796 /* Don't be recording funcs now */
797 ftrace_record_suspend++;
798 save_ftrace_enabled = ftrace_enabled;
801 start = ftrace_now(raw_smp_processor_id());
802 ftrace_update_cnt = 0;
804 /* No locks needed, the machine is stopped! */
805 for (i = 0; i < FTRACE_HASHSIZE; i++) {
806 INIT_HLIST_HEAD(&temp_list);
807 head = &ftrace_hash[i];
809 /* all CPUS are stopped, we are safe to modify code */
810 hlist_for_each_entry_safe(p, t, n, head, node) {
811 /* Skip over failed records which have not been
813 if (p->flags & FTRACE_FL_FAILED)
816 /* Unconverted records are always at the head of the
817 * hash bucket. Once we encounter a converted record,
818 * simply skip over to the next bucket. Saves ftraced
819 * some processor cycles (ftrace does its bid for
820 * global warming :-p ). */
821 if (p->flags & (FTRACE_FL_CONVERTED))
824 /* Ignore updates to this record's mcount site.
825 * Reintroduce this record at the head of this
826 * bucket to attempt to "convert" it again if
827 * the kprobe on it is unregistered before the
829 if (get_kprobe((void *)p->ip)) {
831 INIT_HLIST_NODE(&p->node);
832 hlist_add_head(&p->node, &temp_list);
839 /* convert record (i.e, patch mcount-call with NOP) */
840 if (ftrace_code_disable(p)) {
841 p->flags |= FTRACE_FL_CONVERTED;
844 if ((system_state == SYSTEM_BOOTING) ||
845 !core_kernel_text(p->ip)) {
852 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
854 INIT_HLIST_NODE(&p->node);
855 hlist_add_head(&p->node, head);
859 stop = ftrace_now(raw_smp_processor_id());
860 ftrace_update_time = stop - start;
861 ftrace_update_tot_cnt += ftrace_update_cnt;
864 ftrace_enabled = save_ftrace_enabled;
865 ftrace_record_suspend--;
870 static int ftrace_update_code(void)
872 if (unlikely(ftrace_disabled) ||
873 !ftrace_enabled || !ftraced_trigger)
876 stop_machine(__ftrace_update_code, NULL, NULL);
881 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
883 struct ftrace_page *pg;
887 /* allocate a few pages */
888 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
889 if (!ftrace_pages_start)
893 * Allocate a few more pages.
895 * TODO: have some parser search vmlinux before
896 * final linking to find all calls to ftrace.
898 * a) know how many pages to allocate.
900 * b) set up the table then.
902 * The dynamic code is still necessary for
906 pg = ftrace_pages = ftrace_pages_start;
908 cnt = num_to_init / ENTRIES_PER_PAGE;
909 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
912 for (i = 0; i < cnt; i++) {
913 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
915 /* If we fail, we'll try later anyway */
926 FTRACE_ITER_FILTER = (1 << 0),
927 FTRACE_ITER_CONT = (1 << 1),
928 FTRACE_ITER_NOTRACE = (1 << 2),
929 FTRACE_ITER_FAILURES = (1 << 3),
932 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
934 struct ftrace_iterator {
936 struct ftrace_page *pg;
939 unsigned char buffer[FTRACE_BUFF_MAX+1];
945 t_next(struct seq_file *m, void *v, loff_t *pos)
947 struct ftrace_iterator *iter = m->private;
948 struct dyn_ftrace *rec = NULL;
952 /* should not be called from interrupt context */
953 spin_lock(&ftrace_lock);
955 if (iter->idx >= iter->pg->index) {
956 if (iter->pg->next) {
957 iter->pg = iter->pg->next;
962 rec = &iter->pg->records[iter->idx++];
963 if ((rec->flags & FTRACE_FL_FREE) ||
965 (!(iter->flags & FTRACE_ITER_FAILURES) &&
966 (rec->flags & FTRACE_FL_FAILED)) ||
968 ((iter->flags & FTRACE_ITER_FAILURES) &&
969 !(rec->flags & FTRACE_FL_FAILED)) ||
971 ((iter->flags & FTRACE_ITER_NOTRACE) &&
972 !(rec->flags & FTRACE_FL_NOTRACE))) {
977 spin_unlock(&ftrace_lock);
984 static void *t_start(struct seq_file *m, loff_t *pos)
986 struct ftrace_iterator *iter = m->private;
990 if (*pos != iter->pos) {
991 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
995 p = t_next(m, p, &l);
1001 static void t_stop(struct seq_file *m, void *p)
1005 static int t_show(struct seq_file *m, void *v)
1007 struct dyn_ftrace *rec = v;
1008 char str[KSYM_SYMBOL_LEN];
1013 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1015 seq_printf(m, "%s\n", str);
1020 static struct seq_operations show_ftrace_seq_ops = {
1028 ftrace_avail_open(struct inode *inode, struct file *file)
1030 struct ftrace_iterator *iter;
1033 if (unlikely(ftrace_disabled))
1036 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1040 iter->pg = ftrace_pages_start;
1043 ret = seq_open(file, &show_ftrace_seq_ops);
1045 struct seq_file *m = file->private_data;
1055 int ftrace_avail_release(struct inode *inode, struct file *file)
1057 struct seq_file *m = (struct seq_file *)file->private_data;
1058 struct ftrace_iterator *iter = m->private;
1060 seq_release(inode, file);
1067 ftrace_failures_open(struct inode *inode, struct file *file)
1071 struct ftrace_iterator *iter;
1073 ret = ftrace_avail_open(inode, file);
1075 m = (struct seq_file *)file->private_data;
1076 iter = (struct ftrace_iterator *)m->private;
1077 iter->flags = FTRACE_ITER_FAILURES;
1084 static void ftrace_filter_reset(int enable)
1086 struct ftrace_page *pg;
1087 struct dyn_ftrace *rec;
1088 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1091 /* should not be called from interrupt context */
1092 spin_lock(&ftrace_lock);
1094 ftrace_filtered = 0;
1095 pg = ftrace_pages_start;
1097 for (i = 0; i < pg->index; i++) {
1098 rec = &pg->records[i];
1099 if (rec->flags & FTRACE_FL_FAILED)
1101 rec->flags &= ~type;
1105 spin_unlock(&ftrace_lock);
1109 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1111 struct ftrace_iterator *iter;
1114 if (unlikely(ftrace_disabled))
1117 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1121 mutex_lock(&ftrace_regex_lock);
1122 if ((file->f_mode & FMODE_WRITE) &&
1123 !(file->f_flags & O_APPEND))
1124 ftrace_filter_reset(enable);
1126 if (file->f_mode & FMODE_READ) {
1127 iter->pg = ftrace_pages_start;
1129 iter->flags = enable ? FTRACE_ITER_FILTER :
1130 FTRACE_ITER_NOTRACE;
1132 ret = seq_open(file, &show_ftrace_seq_ops);
1134 struct seq_file *m = file->private_data;
1139 file->private_data = iter;
1140 mutex_unlock(&ftrace_regex_lock);
1146 ftrace_filter_open(struct inode *inode, struct file *file)
1148 return ftrace_regex_open(inode, file, 1);
1152 ftrace_notrace_open(struct inode *inode, struct file *file)
1154 return ftrace_regex_open(inode, file, 0);
1158 ftrace_regex_read(struct file *file, char __user *ubuf,
1159 size_t cnt, loff_t *ppos)
1161 if (file->f_mode & FMODE_READ)
1162 return seq_read(file, ubuf, cnt, ppos);
1168 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1172 if (file->f_mode & FMODE_READ)
1173 ret = seq_lseek(file, offset, origin);
1175 file->f_pos = ret = 1;
1188 ftrace_match(unsigned char *buff, int len, int enable)
1190 char str[KSYM_SYMBOL_LEN];
1191 char *search = NULL;
1192 struct ftrace_page *pg;
1193 struct dyn_ftrace *rec;
1194 int type = MATCH_FULL;
1195 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1196 unsigned i, match = 0, search_len = 0;
1198 for (i = 0; i < len; i++) {
1199 if (buff[i] == '*') {
1201 search = buff + i + 1;
1202 type = MATCH_END_ONLY;
1203 search_len = len - (i + 1);
1205 if (type == MATCH_END_ONLY) {
1206 type = MATCH_MIDDLE_ONLY;
1209 type = MATCH_FRONT_ONLY;
1217 /* should not be called from interrupt context */
1218 spin_lock(&ftrace_lock);
1220 ftrace_filtered = 1;
1221 pg = ftrace_pages_start;
1223 for (i = 0; i < pg->index; i++) {
1227 rec = &pg->records[i];
1228 if (rec->flags & FTRACE_FL_FAILED)
1230 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1233 if (strcmp(str, buff) == 0)
1236 case MATCH_FRONT_ONLY:
1237 if (memcmp(str, buff, match) == 0)
1240 case MATCH_MIDDLE_ONLY:
1241 if (strstr(str, search))
1244 case MATCH_END_ONLY:
1245 ptr = strstr(str, search);
1246 if (ptr && (ptr[search_len] == 0))
1255 spin_unlock(&ftrace_lock);
1259 ftrace_regex_write(struct file *file, const char __user *ubuf,
1260 size_t cnt, loff_t *ppos, int enable)
1262 struct ftrace_iterator *iter;
1267 if (!cnt || cnt < 0)
1270 mutex_lock(&ftrace_regex_lock);
1272 if (file->f_mode & FMODE_READ) {
1273 struct seq_file *m = file->private_data;
1276 iter = file->private_data;
1279 iter->flags &= ~FTRACE_ITER_CONT;
1280 iter->buffer_idx = 0;
1283 ret = get_user(ch, ubuf++);
1289 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1290 /* skip white space */
1291 while (cnt && isspace(ch)) {
1292 ret = get_user(ch, ubuf++);
1300 file->f_pos += read;
1305 iter->buffer_idx = 0;
1308 while (cnt && !isspace(ch)) {
1309 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1310 iter->buffer[iter->buffer_idx++] = ch;
1315 ret = get_user(ch, ubuf++);
1324 iter->buffer[iter->buffer_idx] = 0;
1325 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1326 iter->buffer_idx = 0;
1328 iter->flags |= FTRACE_ITER_CONT;
1331 file->f_pos += read;
1335 mutex_unlock(&ftrace_regex_lock);
1341 ftrace_filter_write(struct file *file, const char __user *ubuf,
1342 size_t cnt, loff_t *ppos)
1344 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1348 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1349 size_t cnt, loff_t *ppos)
1351 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1355 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1357 if (unlikely(ftrace_disabled))
1360 mutex_lock(&ftrace_regex_lock);
1362 ftrace_filter_reset(enable);
1364 ftrace_match(buf, len, enable);
1365 mutex_unlock(&ftrace_regex_lock);
1369 * ftrace_set_filter - set a function to filter on in ftrace
1370 * @buf - the string that holds the function filter text.
1371 * @len - the length of the string.
1372 * @reset - non zero to reset all filters before applying this filter.
1374 * Filters denote which functions should be enabled when tracing is enabled.
1375 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1377 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1379 ftrace_set_regex(buf, len, reset, 1);
1383 * ftrace_set_notrace - set a function to not trace in ftrace
1384 * @buf - the string that holds the function notrace text.
1385 * @len - the length of the string.
1386 * @reset - non zero to reset all filters before applying this filter.
1388 * Notrace Filters denote which functions should not be enabled when tracing
1389 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1392 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1394 ftrace_set_regex(buf, len, reset, 0);
1398 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1400 struct seq_file *m = (struct seq_file *)file->private_data;
1401 struct ftrace_iterator *iter;
1403 mutex_lock(&ftrace_regex_lock);
1404 if (file->f_mode & FMODE_READ) {
1407 seq_release(inode, file);
1409 iter = file->private_data;
1411 if (iter->buffer_idx) {
1413 iter->buffer[iter->buffer_idx] = 0;
1414 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1417 mutex_lock(&ftrace_sysctl_lock);
1418 mutex_lock(&ftraced_lock);
1419 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1420 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1421 mutex_unlock(&ftraced_lock);
1422 mutex_unlock(&ftrace_sysctl_lock);
1425 mutex_unlock(&ftrace_regex_lock);
1430 ftrace_filter_release(struct inode *inode, struct file *file)
1432 return ftrace_regex_release(inode, file, 1);
1436 ftrace_notrace_release(struct inode *inode, struct file *file)
1438 return ftrace_regex_release(inode, file, 0);
1442 ftraced_read(struct file *filp, char __user *ubuf,
1443 size_t cnt, loff_t *ppos)
1445 /* don't worry about races */
1446 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1447 int r = strlen(buf);
1449 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1453 ftraced_write(struct file *filp, const char __user *ubuf,
1454 size_t cnt, loff_t *ppos)
1460 if (cnt >= sizeof(buf))
1463 if (copy_from_user(&buf, ubuf, cnt))
1466 if (strncmp(buf, "enable", 6) == 0)
1468 else if (strncmp(buf, "disable", 7) == 0)
1473 ret = strict_strtoul(buf, 10, &val);
1481 ftrace_enable_daemon();
1483 ftrace_disable_daemon();
1490 static struct file_operations ftrace_avail_fops = {
1491 .open = ftrace_avail_open,
1493 .llseek = seq_lseek,
1494 .release = ftrace_avail_release,
1497 static struct file_operations ftrace_failures_fops = {
1498 .open = ftrace_failures_open,
1500 .llseek = seq_lseek,
1501 .release = ftrace_avail_release,
1504 static struct file_operations ftrace_filter_fops = {
1505 .open = ftrace_filter_open,
1506 .read = ftrace_regex_read,
1507 .write = ftrace_filter_write,
1508 .llseek = ftrace_regex_lseek,
1509 .release = ftrace_filter_release,
1512 static struct file_operations ftrace_notrace_fops = {
1513 .open = ftrace_notrace_open,
1514 .read = ftrace_regex_read,
1515 .write = ftrace_notrace_write,
1516 .llseek = ftrace_regex_lseek,
1517 .release = ftrace_notrace_release,
1520 static struct file_operations ftraced_fops = {
1521 .open = tracing_open_generic,
1522 .read = ftraced_read,
1523 .write = ftraced_write,
1527 * ftrace_force_update - force an update to all recording ftrace functions
1529 int ftrace_force_update(void)
1533 if (unlikely(ftrace_disabled))
1536 mutex_lock(&ftrace_sysctl_lock);
1537 mutex_lock(&ftraced_lock);
1540 * If ftraced_trigger is not set, then there is nothing
1543 if (ftraced_trigger && !ftrace_update_code())
1546 mutex_unlock(&ftraced_lock);
1547 mutex_unlock(&ftrace_sysctl_lock);
1552 static void ftrace_force_shutdown(void)
1554 struct task_struct *task;
1555 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1557 mutex_lock(&ftraced_lock);
1558 task = ftraced_task;
1559 ftraced_task = NULL;
1560 ftraced_suspend = -1;
1561 ftrace_run_update_code(command);
1562 mutex_unlock(&ftraced_lock);
1568 static __init int ftrace_init_debugfs(void)
1570 struct dentry *d_tracer;
1571 struct dentry *entry;
1573 d_tracer = tracing_init_dentry();
1575 entry = debugfs_create_file("available_filter_functions", 0444,
1576 d_tracer, NULL, &ftrace_avail_fops);
1578 pr_warning("Could not create debugfs "
1579 "'available_filter_functions' entry\n");
1581 entry = debugfs_create_file("failures", 0444,
1582 d_tracer, NULL, &ftrace_failures_fops);
1584 pr_warning("Could not create debugfs 'failures' entry\n");
1586 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1587 NULL, &ftrace_filter_fops);
1589 pr_warning("Could not create debugfs "
1590 "'set_ftrace_filter' entry\n");
1592 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1593 NULL, &ftrace_notrace_fops);
1595 pr_warning("Could not create debugfs "
1596 "'set_ftrace_notrace' entry\n");
1598 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1599 NULL, &ftraced_fops);
1601 pr_warning("Could not create debugfs "
1602 "'ftraced_enabled' entry\n");
1606 fs_initcall(ftrace_init_debugfs);
1608 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1609 static int ftrace_convert_nops(unsigned long *start,
1614 unsigned long flags;
1618 addr = ftrace_call_adjust(*p++);
1619 /* should not be called from interrupt context */
1620 spin_lock(&ftrace_lock);
1621 ftrace_record_ip(addr);
1622 spin_unlock(&ftrace_lock);
1623 ftrace_shutdown_replenish();
1627 local_irq_save(flags);
1628 __ftrace_update_code(p);
1629 local_irq_restore(flags);
1634 void ftrace_init_module(unsigned long *start, unsigned long *end)
1636 if (ftrace_disabled || start == end)
1638 ftrace_convert_nops(start, end);
1641 extern unsigned long __start_mcount_loc[];
1642 extern unsigned long __stop_mcount_loc[];
1644 void __init ftrace_init(void)
1646 unsigned long count, addr, flags;
1649 /* Keep the ftrace pointer to the stub */
1650 addr = (unsigned long)ftrace_stub;
1652 local_irq_save(flags);
1653 ftrace_dyn_arch_init(&addr);
1654 local_irq_restore(flags);
1656 /* ftrace_dyn_arch_init places the return code in addr */
1660 count = __stop_mcount_loc - __start_mcount_loc;
1662 ret = ftrace_dyn_table_alloc(count);
1666 last_ftrace_enabled = ftrace_enabled = 1;
1668 ret = ftrace_convert_nops(__start_mcount_loc,
1673 ftrace_disabled = 1;
1675 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1677 static void ftrace_release_hash(unsigned long start, unsigned long end)
1679 struct dyn_ftrace *rec;
1680 struct hlist_node *t, *n;
1681 struct hlist_head *head, temp_list;
1682 unsigned long flags;
1685 preempt_disable_notrace();
1687 /* disable incase we call something that calls mcount */
1688 cpu = raw_smp_processor_id();
1689 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1691 ftrace_hash_lock(flags);
1693 for (i = 0; i < FTRACE_HASHSIZE; i++) {
1694 INIT_HLIST_HEAD(&temp_list);
1695 head = &ftrace_hash[i];
1697 /* all CPUS are stopped, we are safe to modify code */
1698 hlist_for_each_entry_safe(rec, t, n, head, node) {
1699 if (rec->flags & FTRACE_FL_FREE)
1702 if ((rec->ip >= start) && (rec->ip < end))
1703 ftrace_free_rec(rec);
1707 ftrace_hash_unlock(flags);
1709 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1710 preempt_enable_notrace();
1714 static int ftraced(void *ignore)
1716 unsigned long usecs;
1718 while (!kthread_should_stop()) {
1720 set_current_state(TASK_INTERRUPTIBLE);
1722 /* check once a second */
1723 schedule_timeout(HZ);
1725 if (unlikely(ftrace_disabled))
1728 mutex_lock(&ftrace_sysctl_lock);
1729 mutex_lock(&ftraced_lock);
1730 if (!ftraced_suspend && !ftraced_stop &&
1731 ftrace_update_code()) {
1732 usecs = nsecs_to_usecs(ftrace_update_time);
1733 if (ftrace_update_tot_cnt > 100000) {
1734 ftrace_update_tot_cnt = 0;
1735 pr_info("hm, dftrace overflow: %lu change%s"
1736 " (%lu total) in %lu usec%s\n",
1738 ftrace_update_cnt != 1 ? "s" : "",
1739 ftrace_update_tot_cnt,
1740 usecs, usecs != 1 ? "s" : "");
1741 ftrace_disabled = 1;
1745 mutex_unlock(&ftraced_lock);
1746 mutex_unlock(&ftrace_sysctl_lock);
1748 ftrace_shutdown_replenish();
1750 __set_current_state(TASK_RUNNING);
1754 static int __init ftrace_dynamic_init(void)
1756 struct task_struct *p;
1760 addr = (unsigned long)ftrace_record_ip;
1762 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1764 /* ftrace_dyn_arch_init places the return code in addr */
1770 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1774 p = kthread_run(ftraced, NULL, "ftraced");
1780 last_ftrace_enabled = ftrace_enabled = 1;
1786 ftrace_disabled = 1;
1790 core_initcall(ftrace_dynamic_init);
1791 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1794 # define ftrace_startup() do { } while (0)
1795 # define ftrace_shutdown() do { } while (0)
1796 # define ftrace_startup_sysctl() do { } while (0)
1797 # define ftrace_shutdown_sysctl() do { } while (0)
1798 # define ftrace_force_shutdown() do { } while (0)
1799 #endif /* CONFIG_DYNAMIC_FTRACE */
1802 * ftrace_kill_atomic - kill ftrace from critical sections
1804 * This function should be used by panic code. It stops ftrace
1805 * but in a not so nice way. If you need to simply kill ftrace
1806 * from a non-atomic section, use ftrace_kill.
1808 void ftrace_kill_atomic(void)
1810 ftrace_disabled = 1;
1812 #ifdef CONFIG_DYNAMIC_FTRACE
1813 ftraced_suspend = -1;
1815 clear_ftrace_function();
1819 * ftrace_kill - totally shutdown ftrace
1821 * This is a safety measure. If something was detected that seems
1822 * wrong, calling this function will keep ftrace from doing
1823 * any more modifications, and updates.
1824 * used when something went wrong.
1826 void ftrace_kill(void)
1828 mutex_lock(&ftrace_sysctl_lock);
1829 ftrace_disabled = 1;
1832 clear_ftrace_function();
1833 mutex_unlock(&ftrace_sysctl_lock);
1835 /* Try to totally disable ftrace */
1836 ftrace_force_shutdown();
1840 * register_ftrace_function - register a function for profiling
1841 * @ops - ops structure that holds the function for profiling.
1843 * Register a function to be called by all functions in the
1846 * Note: @ops->func and all the functions it calls must be labeled
1847 * with "notrace", otherwise it will go into a
1850 int register_ftrace_function(struct ftrace_ops *ops)
1854 if (unlikely(ftrace_disabled))
1857 mutex_lock(&ftrace_sysctl_lock);
1858 ret = __register_ftrace_function(ops);
1860 mutex_unlock(&ftrace_sysctl_lock);
1866 * unregister_ftrace_function - unresgister a function for profiling.
1867 * @ops - ops structure that holds the function to unregister
1869 * Unregister a function that was added to be called by ftrace profiling.
1871 int unregister_ftrace_function(struct ftrace_ops *ops)
1875 mutex_lock(&ftrace_sysctl_lock);
1876 ret = __unregister_ftrace_function(ops);
1878 mutex_unlock(&ftrace_sysctl_lock);
1884 ftrace_enable_sysctl(struct ctl_table *table, int write,
1885 struct file *file, void __user *buffer, size_t *lenp,
1890 if (unlikely(ftrace_disabled))
1893 mutex_lock(&ftrace_sysctl_lock);
1895 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1897 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1900 last_ftrace_enabled = ftrace_enabled;
1902 if (ftrace_enabled) {
1904 ftrace_startup_sysctl();
1906 /* we are starting ftrace again */
1907 if (ftrace_list != &ftrace_list_end) {
1908 if (ftrace_list->next == &ftrace_list_end)
1909 ftrace_trace_function = ftrace_list->func;
1911 ftrace_trace_function = ftrace_list_func;
1915 /* stopping ftrace calls (just send to ftrace_stub) */
1916 ftrace_trace_function = ftrace_stub;
1918 ftrace_shutdown_sysctl();
1922 mutex_unlock(&ftrace_sysctl_lock);