2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
40 * Since MCOUNT_ADDR may point to mcount itself, we do not want
41 * to get it confused by reading a reference in the code as we
42 * are parsing on objcopy output of text. Use a variable for
45 static unsigned long mcount_addr = MCOUNT_ADDR;
48 * ftrace_disabled is set when an anomaly is discovered.
49 * ftrace_disabled is much stronger than ftrace_enabled.
51 static int ftrace_disabled __read_mostly;
53 static DEFINE_SPINLOCK(ftrace_lock);
54 static DEFINE_MUTEX(ftrace_sysctl_lock);
56 static struct ftrace_ops ftrace_list_end __read_mostly =
61 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
62 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
64 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
66 struct ftrace_ops *op = ftrace_list;
68 /* in case someone actually ports this to alpha! */
69 read_barrier_depends();
71 while (op != &ftrace_list_end) {
73 read_barrier_depends();
74 op->func(ip, parent_ip);
80 * clear_ftrace_function - reset the ftrace function
82 * This NULLs the ftrace function and in essence stops
83 * tracing. There may be lag
85 void clear_ftrace_function(void)
87 ftrace_trace_function = ftrace_stub;
90 static int __register_ftrace_function(struct ftrace_ops *ops)
92 /* should not be called from interrupt context */
93 spin_lock(&ftrace_lock);
95 ops->next = ftrace_list;
97 * We are entering ops into the ftrace_list but another
98 * CPU might be walking that list. We need to make sure
99 * the ops->next pointer is valid before another CPU sees
100 * the ops pointer included into the ftrace_list.
105 if (ftrace_enabled) {
107 * For one func, simply call it directly.
108 * For more than one func, call the chain.
110 if (ops->next == &ftrace_list_end)
111 ftrace_trace_function = ops->func;
113 ftrace_trace_function = ftrace_list_func;
116 spin_unlock(&ftrace_lock);
121 static int __unregister_ftrace_function(struct ftrace_ops *ops)
123 struct ftrace_ops **p;
126 /* should not be called from interrupt context */
127 spin_lock(&ftrace_lock);
130 * If we are removing the last function, then simply point
131 * to the ftrace_stub.
133 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
134 ftrace_trace_function = ftrace_stub;
135 ftrace_list = &ftrace_list_end;
139 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
150 if (ftrace_enabled) {
151 /* If we only have one func left, then call that directly */
152 if (ftrace_list == &ftrace_list_end ||
153 ftrace_list->next == &ftrace_list_end)
154 ftrace_trace_function = ftrace_list->func;
158 spin_unlock(&ftrace_lock);
163 #ifdef CONFIG_DYNAMIC_FTRACE
165 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
167 * The hash lock is only needed when the recording of the mcount
168 * callers are dynamic. That is, by the caller themselves and
169 * not recorded via the compilation.
171 static DEFINE_SPINLOCK(ftrace_hash_lock);
172 #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
173 #define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
175 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
176 #define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
177 #define ftrace_hash_unlock(flags) do { } while(0)
180 static struct task_struct *ftraced_task;
183 FTRACE_ENABLE_CALLS = (1 << 0),
184 FTRACE_DISABLE_CALLS = (1 << 1),
185 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
186 FTRACE_ENABLE_MCOUNT = (1 << 3),
187 FTRACE_DISABLE_MCOUNT = (1 << 4),
190 static int ftrace_filtered;
191 static int tracing_on;
192 static int frozen_record_count;
194 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
196 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
198 static DEFINE_MUTEX(ftraced_lock);
199 static DEFINE_MUTEX(ftrace_regex_lock);
202 struct ftrace_page *next;
204 struct dyn_ftrace records[];
207 #define ENTRIES_PER_PAGE \
208 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
210 /* estimate from running different kernels */
211 #define NR_TO_INIT 10000
213 static struct ftrace_page *ftrace_pages_start;
214 static struct ftrace_page *ftrace_pages;
216 static int ftraced_trigger;
217 static int ftraced_suspend;
218 static int ftraced_stop;
220 static int ftrace_record_suspend;
222 static struct dyn_ftrace *ftrace_free_records;
225 #ifdef CONFIG_KPROBES
226 static inline void freeze_record(struct dyn_ftrace *rec)
228 if (!(rec->flags & FTRACE_FL_FROZEN)) {
229 rec->flags |= FTRACE_FL_FROZEN;
230 frozen_record_count++;
234 static inline void unfreeze_record(struct dyn_ftrace *rec)
236 if (rec->flags & FTRACE_FL_FROZEN) {
237 rec->flags &= ~FTRACE_FL_FROZEN;
238 frozen_record_count--;
242 static inline int record_frozen(struct dyn_ftrace *rec)
244 return rec->flags & FTRACE_FL_FROZEN;
247 # define freeze_record(rec) ({ 0; })
248 # define unfreeze_record(rec) ({ 0; })
249 # define record_frozen(rec) ({ 0; })
250 #endif /* CONFIG_KPROBES */
252 int skip_trace(unsigned long ip)
255 struct dyn_ftrace *rec;
256 struct hlist_node *t;
257 struct hlist_head *head;
259 if (frozen_record_count == 0)
262 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
263 hlist_for_each_entry_rcu(rec, t, head, node) {
265 if (record_frozen(rec)) {
266 if (rec->flags & FTRACE_FL_FAILED)
269 if (!(rec->flags & FTRACE_FL_CONVERTED))
272 if (!tracing_on || !ftrace_enabled)
275 if (ftrace_filtered) {
276 fl = rec->flags & (FTRACE_FL_FILTER |
278 if (!fl || (fl & FTRACE_FL_NOTRACE))
290 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
292 struct dyn_ftrace *p;
293 struct hlist_node *t;
296 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
307 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
309 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
312 /* called from kstop_machine */
313 static inline void ftrace_del_hash(struct dyn_ftrace *node)
315 hlist_del(&node->node);
318 static void ftrace_free_rec(struct dyn_ftrace *rec)
320 rec->ip = (unsigned long)ftrace_free_records;
321 ftrace_free_records = rec;
322 rec->flags |= FTRACE_FL_FREE;
325 void ftrace_release(void *start, unsigned long size)
327 struct dyn_ftrace *rec;
328 struct ftrace_page *pg;
329 unsigned long s = (unsigned long)start;
330 unsigned long e = s + size;
333 if (ftrace_disabled || !start)
336 /* should not be called from interrupt context */
337 spin_lock(&ftrace_lock);
339 for (pg = ftrace_pages_start; pg; pg = pg->next) {
340 for (i = 0; i < pg->index; i++) {
341 rec = &pg->records[i];
343 if ((rec->ip >= s) && (rec->ip < e))
344 ftrace_free_rec(rec);
347 spin_unlock(&ftrace_lock);
351 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
353 struct dyn_ftrace *rec;
355 /* First check for freed records */
356 if (ftrace_free_records) {
357 rec = ftrace_free_records;
359 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
361 ftrace_free_records = NULL;
367 ftrace_free_records = (void *)rec->ip;
368 memset(rec, 0, sizeof(*rec));
372 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
373 if (!ftrace_pages->next)
375 ftrace_pages = ftrace_pages->next;
378 return &ftrace_pages->records[ftrace_pages->index++];
382 ftrace_record_ip(unsigned long ip)
384 struct dyn_ftrace *node;
390 if (!ftrace_enabled || ftrace_disabled)
393 resched = need_resched();
394 preempt_disable_notrace();
397 * We simply need to protect against recursion.
398 * Use the the raw version of smp_processor_id and not
399 * __get_cpu_var which can call debug hooks that can
400 * cause a recursive crash here.
402 cpu = raw_smp_processor_id();
403 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
404 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
407 if (unlikely(ftrace_record_suspend))
410 key = hash_long(ip, FTRACE_HASHBITS);
412 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
414 if (ftrace_ip_in_hash(ip, key))
417 ftrace_hash_lock(flags);
419 /* This ip may have hit the hash before the lock */
420 if (ftrace_ip_in_hash(ip, key))
423 node = ftrace_alloc_dyn_node(ip);
429 ftrace_add_hash(node, key);
434 ftrace_hash_unlock(flags);
436 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
438 /* prevent recursion with scheduler */
440 preempt_enable_no_resched_notrace();
442 preempt_enable_notrace();
445 #define FTRACE_ADDR ((long)(ftrace_caller))
448 __ftrace_replace_code(struct dyn_ftrace *rec,
449 unsigned char *old, unsigned char *new, int enable)
451 unsigned long ip, fl;
455 if (ftrace_filtered && enable) {
457 * If filtering is on:
459 * If this record is set to be filtered and
460 * is enabled then do nothing.
462 * If this record is set to be filtered and
463 * it is not enabled, enable it.
465 * If this record is not set to be filtered
466 * and it is not enabled do nothing.
468 * If this record is set not to trace then
471 * If this record is set not to trace and
472 * it is enabled then disable it.
474 * If this record is not set to be filtered and
475 * it is enabled, disable it.
478 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
481 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
482 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
483 !fl || (fl == FTRACE_FL_NOTRACE))
487 * If it is enabled disable it,
488 * otherwise enable it!
490 if (fl & FTRACE_FL_ENABLED) {
491 /* swap new and old */
493 old = ftrace_call_replace(ip, FTRACE_ADDR);
494 rec->flags &= ~FTRACE_FL_ENABLED;
496 new = ftrace_call_replace(ip, FTRACE_ADDR);
497 rec->flags |= FTRACE_FL_ENABLED;
503 * If this record is set not to trace and is
504 * not enabled, do nothing.
506 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
507 if (fl == FTRACE_FL_NOTRACE)
510 new = ftrace_call_replace(ip, FTRACE_ADDR);
512 old = ftrace_call_replace(ip, FTRACE_ADDR);
515 if (rec->flags & FTRACE_FL_ENABLED)
517 rec->flags |= FTRACE_FL_ENABLED;
519 if (!(rec->flags & FTRACE_FL_ENABLED))
521 rec->flags &= ~FTRACE_FL_ENABLED;
525 return ftrace_modify_code(ip, old, new);
528 static void ftrace_replace_code(int enable)
531 unsigned char *new = NULL, *old = NULL;
532 struct dyn_ftrace *rec;
533 struct ftrace_page *pg;
536 old = ftrace_nop_replace();
538 new = ftrace_nop_replace();
540 for (pg = ftrace_pages_start; pg; pg = pg->next) {
541 for (i = 0; i < pg->index; i++) {
542 rec = &pg->records[i];
544 /* don't modify code that has already faulted */
545 if (rec->flags & FTRACE_FL_FAILED)
548 /* ignore updates to this record's mcount site */
549 if (get_kprobe((void *)rec->ip)) {
553 unfreeze_record(rec);
556 failed = __ftrace_replace_code(rec, old, new, enable);
557 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
558 rec->flags |= FTRACE_FL_FAILED;
559 if ((system_state == SYSTEM_BOOTING) ||
560 !core_kernel_text(rec->ip)) {
561 ftrace_del_hash(rec);
562 ftrace_free_rec(rec);
569 static void ftrace_shutdown_replenish(void)
571 if (ftrace_pages->next)
574 /* allocate another page */
575 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
579 ftrace_code_disable(struct dyn_ftrace *rec)
582 unsigned char *nop, *call;
587 nop = ftrace_nop_replace();
588 call = ftrace_call_replace(ip, mcount_addr);
590 failed = ftrace_modify_code(ip, call, nop);
592 rec->flags |= FTRACE_FL_FAILED;
598 static int __ftrace_update_code(void *ignore);
600 static int __ftrace_modify_code(void *data)
605 if (*command & FTRACE_ENABLE_CALLS) {
607 * Update any recorded ips now that we have the
610 __ftrace_update_code(NULL);
611 ftrace_replace_code(1);
613 } else if (*command & FTRACE_DISABLE_CALLS) {
614 ftrace_replace_code(0);
618 if (*command & FTRACE_UPDATE_TRACE_FUNC)
619 ftrace_update_ftrace_func(ftrace_trace_function);
621 if (*command & FTRACE_ENABLE_MCOUNT) {
622 addr = (unsigned long)ftrace_record_ip;
623 ftrace_mcount_set(&addr);
624 } else if (*command & FTRACE_DISABLE_MCOUNT) {
625 addr = (unsigned long)ftrace_stub;
626 ftrace_mcount_set(&addr);
632 static void ftrace_run_update_code(int command)
634 stop_machine(__ftrace_modify_code, &command, NULL);
637 void ftrace_disable_daemon(void)
639 /* Stop the daemon from calling kstop_machine */
640 mutex_lock(&ftraced_lock);
642 mutex_unlock(&ftraced_lock);
644 ftrace_force_update();
647 void ftrace_enable_daemon(void)
649 mutex_lock(&ftraced_lock);
651 mutex_unlock(&ftraced_lock);
653 ftrace_force_update();
656 static ftrace_func_t saved_ftrace_func;
658 static void ftrace_startup(void)
662 if (unlikely(ftrace_disabled))
665 mutex_lock(&ftraced_lock);
667 if (ftraced_suspend == 1)
668 command |= FTRACE_ENABLE_CALLS;
670 if (saved_ftrace_func != ftrace_trace_function) {
671 saved_ftrace_func = ftrace_trace_function;
672 command |= FTRACE_UPDATE_TRACE_FUNC;
675 if (!command || !ftrace_enabled)
678 ftrace_run_update_code(command);
680 mutex_unlock(&ftraced_lock);
683 static void ftrace_shutdown(void)
687 if (unlikely(ftrace_disabled))
690 mutex_lock(&ftraced_lock);
692 if (!ftraced_suspend)
693 command |= FTRACE_DISABLE_CALLS;
695 if (saved_ftrace_func != ftrace_trace_function) {
696 saved_ftrace_func = ftrace_trace_function;
697 command |= FTRACE_UPDATE_TRACE_FUNC;
700 if (!command || !ftrace_enabled)
703 ftrace_run_update_code(command);
705 mutex_unlock(&ftraced_lock);
708 static void ftrace_startup_sysctl(void)
710 int command = FTRACE_ENABLE_MCOUNT;
712 if (unlikely(ftrace_disabled))
715 mutex_lock(&ftraced_lock);
716 /* Force update next time */
717 saved_ftrace_func = NULL;
718 /* ftraced_suspend is true if we want ftrace running */
720 command |= FTRACE_ENABLE_CALLS;
722 ftrace_run_update_code(command);
723 mutex_unlock(&ftraced_lock);
726 static void ftrace_shutdown_sysctl(void)
728 int command = FTRACE_DISABLE_MCOUNT;
730 if (unlikely(ftrace_disabled))
733 mutex_lock(&ftraced_lock);
734 /* ftraced_suspend is true if ftrace is running */
736 command |= FTRACE_DISABLE_CALLS;
738 ftrace_run_update_code(command);
739 mutex_unlock(&ftraced_lock);
742 static cycle_t ftrace_update_time;
743 static unsigned long ftrace_update_cnt;
744 unsigned long ftrace_update_tot_cnt;
746 static int __ftrace_update_code(void *ignore)
748 int i, save_ftrace_enabled;
750 struct dyn_ftrace *p;
751 struct hlist_node *t, *n;
752 struct hlist_head *head, temp_list;
754 /* Don't be recording funcs now */
755 ftrace_record_suspend++;
756 save_ftrace_enabled = ftrace_enabled;
759 start = ftrace_now(raw_smp_processor_id());
760 ftrace_update_cnt = 0;
762 /* No locks needed, the machine is stopped! */
763 for (i = 0; i < FTRACE_HASHSIZE; i++) {
764 INIT_HLIST_HEAD(&temp_list);
765 head = &ftrace_hash[i];
767 /* all CPUS are stopped, we are safe to modify code */
768 hlist_for_each_entry_safe(p, t, n, head, node) {
769 /* Skip over failed records which have not been
771 if (p->flags & FTRACE_FL_FAILED)
774 /* Unconverted records are always at the head of the
775 * hash bucket. Once we encounter a converted record,
776 * simply skip over to the next bucket. Saves ftraced
777 * some processor cycles (ftrace does its bid for
778 * global warming :-p ). */
779 if (p->flags & (FTRACE_FL_CONVERTED))
782 /* Ignore updates to this record's mcount site.
783 * Reintroduce this record at the head of this
784 * bucket to attempt to "convert" it again if
785 * the kprobe on it is unregistered before the
787 if (get_kprobe((void *)p->ip)) {
789 INIT_HLIST_NODE(&p->node);
790 hlist_add_head(&p->node, &temp_list);
797 /* convert record (i.e, patch mcount-call with NOP) */
798 if (ftrace_code_disable(p)) {
799 p->flags |= FTRACE_FL_CONVERTED;
802 if ((system_state == SYSTEM_BOOTING) ||
803 !core_kernel_text(p->ip)) {
810 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
812 INIT_HLIST_NODE(&p->node);
813 hlist_add_head(&p->node, head);
817 stop = ftrace_now(raw_smp_processor_id());
818 ftrace_update_time = stop - start;
819 ftrace_update_tot_cnt += ftrace_update_cnt;
822 ftrace_enabled = save_ftrace_enabled;
823 ftrace_record_suspend--;
828 static int ftrace_update_code(void)
830 if (unlikely(ftrace_disabled) ||
831 !ftrace_enabled || !ftraced_trigger)
834 stop_machine(__ftrace_update_code, NULL, NULL);
839 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
841 struct ftrace_page *pg;
845 /* allocate a few pages */
846 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
847 if (!ftrace_pages_start)
851 * Allocate a few more pages.
853 * TODO: have some parser search vmlinux before
854 * final linking to find all calls to ftrace.
856 * a) know how many pages to allocate.
858 * b) set up the table then.
860 * The dynamic code is still necessary for
864 pg = ftrace_pages = ftrace_pages_start;
866 cnt = num_to_init / ENTRIES_PER_PAGE;
867 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
870 for (i = 0; i < cnt; i++) {
871 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
873 /* If we fail, we'll try later anyway */
884 FTRACE_ITER_FILTER = (1 << 0),
885 FTRACE_ITER_CONT = (1 << 1),
886 FTRACE_ITER_NOTRACE = (1 << 2),
887 FTRACE_ITER_FAILURES = (1 << 3),
890 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
892 struct ftrace_iterator {
894 struct ftrace_page *pg;
897 unsigned char buffer[FTRACE_BUFF_MAX+1];
903 t_next(struct seq_file *m, void *v, loff_t *pos)
905 struct ftrace_iterator *iter = m->private;
906 struct dyn_ftrace *rec = NULL;
910 /* should not be called from interrupt context */
911 spin_lock(&ftrace_lock);
913 if (iter->idx >= iter->pg->index) {
914 if (iter->pg->next) {
915 iter->pg = iter->pg->next;
920 rec = &iter->pg->records[iter->idx++];
921 if ((rec->flags & FTRACE_FL_FREE) ||
923 (!(iter->flags & FTRACE_ITER_FAILURES) &&
924 (rec->flags & FTRACE_FL_FAILED)) ||
926 ((iter->flags & FTRACE_ITER_FAILURES) &&
927 !(rec->flags & FTRACE_FL_FAILED)) ||
929 ((iter->flags & FTRACE_ITER_NOTRACE) &&
930 !(rec->flags & FTRACE_FL_NOTRACE))) {
935 spin_unlock(&ftrace_lock);
942 static void *t_start(struct seq_file *m, loff_t *pos)
944 struct ftrace_iterator *iter = m->private;
948 if (*pos != iter->pos) {
949 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
953 p = t_next(m, p, &l);
959 static void t_stop(struct seq_file *m, void *p)
963 static int t_show(struct seq_file *m, void *v)
965 struct dyn_ftrace *rec = v;
966 char str[KSYM_SYMBOL_LEN];
971 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
973 seq_printf(m, "%s\n", str);
978 static struct seq_operations show_ftrace_seq_ops = {
986 ftrace_avail_open(struct inode *inode, struct file *file)
988 struct ftrace_iterator *iter;
991 if (unlikely(ftrace_disabled))
994 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
998 iter->pg = ftrace_pages_start;
1001 ret = seq_open(file, &show_ftrace_seq_ops);
1003 struct seq_file *m = file->private_data;
1013 int ftrace_avail_release(struct inode *inode, struct file *file)
1015 struct seq_file *m = (struct seq_file *)file->private_data;
1016 struct ftrace_iterator *iter = m->private;
1018 seq_release(inode, file);
1025 ftrace_failures_open(struct inode *inode, struct file *file)
1029 struct ftrace_iterator *iter;
1031 ret = ftrace_avail_open(inode, file);
1033 m = (struct seq_file *)file->private_data;
1034 iter = (struct ftrace_iterator *)m->private;
1035 iter->flags = FTRACE_ITER_FAILURES;
1042 static void ftrace_filter_reset(int enable)
1044 struct ftrace_page *pg;
1045 struct dyn_ftrace *rec;
1046 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1049 /* should not be called from interrupt context */
1050 spin_lock(&ftrace_lock);
1052 ftrace_filtered = 0;
1053 pg = ftrace_pages_start;
1055 for (i = 0; i < pg->index; i++) {
1056 rec = &pg->records[i];
1057 if (rec->flags & FTRACE_FL_FAILED)
1059 rec->flags &= ~type;
1063 spin_unlock(&ftrace_lock);
1067 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1069 struct ftrace_iterator *iter;
1072 if (unlikely(ftrace_disabled))
1075 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1079 mutex_lock(&ftrace_regex_lock);
1080 if ((file->f_mode & FMODE_WRITE) &&
1081 !(file->f_flags & O_APPEND))
1082 ftrace_filter_reset(enable);
1084 if (file->f_mode & FMODE_READ) {
1085 iter->pg = ftrace_pages_start;
1087 iter->flags = enable ? FTRACE_ITER_FILTER :
1088 FTRACE_ITER_NOTRACE;
1090 ret = seq_open(file, &show_ftrace_seq_ops);
1092 struct seq_file *m = file->private_data;
1097 file->private_data = iter;
1098 mutex_unlock(&ftrace_regex_lock);
1104 ftrace_filter_open(struct inode *inode, struct file *file)
1106 return ftrace_regex_open(inode, file, 1);
1110 ftrace_notrace_open(struct inode *inode, struct file *file)
1112 return ftrace_regex_open(inode, file, 0);
1116 ftrace_regex_read(struct file *file, char __user *ubuf,
1117 size_t cnt, loff_t *ppos)
1119 if (file->f_mode & FMODE_READ)
1120 return seq_read(file, ubuf, cnt, ppos);
1126 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1130 if (file->f_mode & FMODE_READ)
1131 ret = seq_lseek(file, offset, origin);
1133 file->f_pos = ret = 1;
1146 ftrace_match(unsigned char *buff, int len, int enable)
1148 char str[KSYM_SYMBOL_LEN];
1149 char *search = NULL;
1150 struct ftrace_page *pg;
1151 struct dyn_ftrace *rec;
1152 int type = MATCH_FULL;
1153 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1154 unsigned i, match = 0, search_len = 0;
1156 for (i = 0; i < len; i++) {
1157 if (buff[i] == '*') {
1159 search = buff + i + 1;
1160 type = MATCH_END_ONLY;
1161 search_len = len - (i + 1);
1163 if (type == MATCH_END_ONLY) {
1164 type = MATCH_MIDDLE_ONLY;
1167 type = MATCH_FRONT_ONLY;
1175 /* should not be called from interrupt context */
1176 spin_lock(&ftrace_lock);
1178 ftrace_filtered = 1;
1179 pg = ftrace_pages_start;
1181 for (i = 0; i < pg->index; i++) {
1185 rec = &pg->records[i];
1186 if (rec->flags & FTRACE_FL_FAILED)
1188 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1191 if (strcmp(str, buff) == 0)
1194 case MATCH_FRONT_ONLY:
1195 if (memcmp(str, buff, match) == 0)
1198 case MATCH_MIDDLE_ONLY:
1199 if (strstr(str, search))
1202 case MATCH_END_ONLY:
1203 ptr = strstr(str, search);
1204 if (ptr && (ptr[search_len] == 0))
1213 spin_unlock(&ftrace_lock);
1217 ftrace_regex_write(struct file *file, const char __user *ubuf,
1218 size_t cnt, loff_t *ppos, int enable)
1220 struct ftrace_iterator *iter;
1225 if (!cnt || cnt < 0)
1228 mutex_lock(&ftrace_regex_lock);
1230 if (file->f_mode & FMODE_READ) {
1231 struct seq_file *m = file->private_data;
1234 iter = file->private_data;
1237 iter->flags &= ~FTRACE_ITER_CONT;
1238 iter->buffer_idx = 0;
1241 ret = get_user(ch, ubuf++);
1247 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1248 /* skip white space */
1249 while (cnt && isspace(ch)) {
1250 ret = get_user(ch, ubuf++);
1258 file->f_pos += read;
1263 iter->buffer_idx = 0;
1266 while (cnt && !isspace(ch)) {
1267 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1268 iter->buffer[iter->buffer_idx++] = ch;
1273 ret = get_user(ch, ubuf++);
1282 iter->buffer[iter->buffer_idx] = 0;
1283 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1284 iter->buffer_idx = 0;
1286 iter->flags |= FTRACE_ITER_CONT;
1289 file->f_pos += read;
1293 mutex_unlock(&ftrace_regex_lock);
1299 ftrace_filter_write(struct file *file, const char __user *ubuf,
1300 size_t cnt, loff_t *ppos)
1302 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1306 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1307 size_t cnt, loff_t *ppos)
1309 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1313 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1315 if (unlikely(ftrace_disabled))
1318 mutex_lock(&ftrace_regex_lock);
1320 ftrace_filter_reset(enable);
1322 ftrace_match(buf, len, enable);
1323 mutex_unlock(&ftrace_regex_lock);
1327 * ftrace_set_filter - set a function to filter on in ftrace
1328 * @buf - the string that holds the function filter text.
1329 * @len - the length of the string.
1330 * @reset - non zero to reset all filters before applying this filter.
1332 * Filters denote which functions should be enabled when tracing is enabled.
1333 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1335 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1337 ftrace_set_regex(buf, len, reset, 1);
1341 * ftrace_set_notrace - set a function to not trace in ftrace
1342 * @buf - the string that holds the function notrace text.
1343 * @len - the length of the string.
1344 * @reset - non zero to reset all filters before applying this filter.
1346 * Notrace Filters denote which functions should not be enabled when tracing
1347 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1350 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1352 ftrace_set_regex(buf, len, reset, 0);
1356 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1358 struct seq_file *m = (struct seq_file *)file->private_data;
1359 struct ftrace_iterator *iter;
1361 mutex_lock(&ftrace_regex_lock);
1362 if (file->f_mode & FMODE_READ) {
1365 seq_release(inode, file);
1367 iter = file->private_data;
1369 if (iter->buffer_idx) {
1371 iter->buffer[iter->buffer_idx] = 0;
1372 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1375 mutex_lock(&ftrace_sysctl_lock);
1376 mutex_lock(&ftraced_lock);
1377 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1378 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1379 mutex_unlock(&ftraced_lock);
1380 mutex_unlock(&ftrace_sysctl_lock);
1383 mutex_unlock(&ftrace_regex_lock);
1388 ftrace_filter_release(struct inode *inode, struct file *file)
1390 return ftrace_regex_release(inode, file, 1);
1394 ftrace_notrace_release(struct inode *inode, struct file *file)
1396 return ftrace_regex_release(inode, file, 0);
1400 ftraced_read(struct file *filp, char __user *ubuf,
1401 size_t cnt, loff_t *ppos)
1403 /* don't worry about races */
1404 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1405 int r = strlen(buf);
1407 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1411 ftraced_write(struct file *filp, const char __user *ubuf,
1412 size_t cnt, loff_t *ppos)
1418 if (cnt >= sizeof(buf))
1421 if (copy_from_user(&buf, ubuf, cnt))
1424 if (strncmp(buf, "enable", 6) == 0)
1426 else if (strncmp(buf, "disable", 7) == 0)
1431 ret = strict_strtoul(buf, 10, &val);
1439 ftrace_enable_daemon();
1441 ftrace_disable_daemon();
1448 static struct file_operations ftrace_avail_fops = {
1449 .open = ftrace_avail_open,
1451 .llseek = seq_lseek,
1452 .release = ftrace_avail_release,
1455 static struct file_operations ftrace_failures_fops = {
1456 .open = ftrace_failures_open,
1458 .llseek = seq_lseek,
1459 .release = ftrace_avail_release,
1462 static struct file_operations ftrace_filter_fops = {
1463 .open = ftrace_filter_open,
1464 .read = ftrace_regex_read,
1465 .write = ftrace_filter_write,
1466 .llseek = ftrace_regex_lseek,
1467 .release = ftrace_filter_release,
1470 static struct file_operations ftrace_notrace_fops = {
1471 .open = ftrace_notrace_open,
1472 .read = ftrace_regex_read,
1473 .write = ftrace_notrace_write,
1474 .llseek = ftrace_regex_lseek,
1475 .release = ftrace_notrace_release,
1478 static struct file_operations ftraced_fops = {
1479 .open = tracing_open_generic,
1480 .read = ftraced_read,
1481 .write = ftraced_write,
1485 * ftrace_force_update - force an update to all recording ftrace functions
1487 int ftrace_force_update(void)
1491 if (unlikely(ftrace_disabled))
1494 mutex_lock(&ftrace_sysctl_lock);
1495 mutex_lock(&ftraced_lock);
1498 * If ftraced_trigger is not set, then there is nothing
1501 if (ftraced_trigger && !ftrace_update_code())
1504 mutex_unlock(&ftraced_lock);
1505 mutex_unlock(&ftrace_sysctl_lock);
1510 static void ftrace_force_shutdown(void)
1512 struct task_struct *task;
1513 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1515 mutex_lock(&ftraced_lock);
1516 task = ftraced_task;
1517 ftraced_task = NULL;
1518 ftraced_suspend = -1;
1519 ftrace_run_update_code(command);
1520 mutex_unlock(&ftraced_lock);
1526 static __init int ftrace_init_debugfs(void)
1528 struct dentry *d_tracer;
1529 struct dentry *entry;
1531 d_tracer = tracing_init_dentry();
1533 entry = debugfs_create_file("available_filter_functions", 0444,
1534 d_tracer, NULL, &ftrace_avail_fops);
1536 pr_warning("Could not create debugfs "
1537 "'available_filter_functions' entry\n");
1539 entry = debugfs_create_file("failures", 0444,
1540 d_tracer, NULL, &ftrace_failures_fops);
1542 pr_warning("Could not create debugfs 'failures' entry\n");
1544 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1545 NULL, &ftrace_filter_fops);
1547 pr_warning("Could not create debugfs "
1548 "'set_ftrace_filter' entry\n");
1550 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1551 NULL, &ftrace_notrace_fops);
1553 pr_warning("Could not create debugfs "
1554 "'set_ftrace_notrace' entry\n");
1556 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1557 NULL, &ftraced_fops);
1559 pr_warning("Could not create debugfs "
1560 "'ftraced_enabled' entry\n");
1564 fs_initcall(ftrace_init_debugfs);
1566 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1567 static int ftrace_convert_nops(unsigned long *start,
1572 unsigned long flags;
1576 addr = ftrace_call_adjust(*p++);
1577 /* should not be called from interrupt context */
1578 spin_lock(&ftrace_lock);
1579 ftrace_record_ip(addr);
1580 spin_unlock(&ftrace_lock);
1581 ftrace_shutdown_replenish();
1585 local_irq_save(flags);
1586 __ftrace_update_code(p);
1587 local_irq_restore(flags);
1592 void ftrace_init_module(unsigned long *start, unsigned long *end)
1594 if (ftrace_disabled || start == end)
1596 ftrace_convert_nops(start, end);
1599 extern unsigned long __start_mcount_loc[];
1600 extern unsigned long __stop_mcount_loc[];
1602 void __init ftrace_init(void)
1604 unsigned long count, addr, flags;
1607 /* Keep the ftrace pointer to the stub */
1608 addr = (unsigned long)ftrace_stub;
1610 local_irq_save(flags);
1611 ftrace_dyn_arch_init(&addr);
1612 local_irq_restore(flags);
1614 /* ftrace_dyn_arch_init places the return code in addr */
1618 count = __stop_mcount_loc - __start_mcount_loc;
1620 ret = ftrace_dyn_table_alloc(count);
1624 last_ftrace_enabled = ftrace_enabled = 1;
1626 ret = ftrace_convert_nops(__start_mcount_loc,
1631 ftrace_disabled = 1;
1633 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1634 static int ftraced(void *ignore)
1636 unsigned long usecs;
1638 while (!kthread_should_stop()) {
1640 set_current_state(TASK_INTERRUPTIBLE);
1642 /* check once a second */
1643 schedule_timeout(HZ);
1645 if (unlikely(ftrace_disabled))
1648 mutex_lock(&ftrace_sysctl_lock);
1649 mutex_lock(&ftraced_lock);
1650 if (!ftraced_suspend && !ftraced_stop &&
1651 ftrace_update_code()) {
1652 usecs = nsecs_to_usecs(ftrace_update_time);
1653 if (ftrace_update_tot_cnt > 100000) {
1654 ftrace_update_tot_cnt = 0;
1655 pr_info("hm, dftrace overflow: %lu change%s"
1656 " (%lu total) in %lu usec%s\n",
1658 ftrace_update_cnt != 1 ? "s" : "",
1659 ftrace_update_tot_cnt,
1660 usecs, usecs != 1 ? "s" : "");
1661 ftrace_disabled = 1;
1665 mutex_unlock(&ftraced_lock);
1666 mutex_unlock(&ftrace_sysctl_lock);
1668 ftrace_shutdown_replenish();
1670 __set_current_state(TASK_RUNNING);
1674 static int __init ftrace_dynamic_init(void)
1676 struct task_struct *p;
1680 addr = (unsigned long)ftrace_record_ip;
1682 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1684 /* ftrace_dyn_arch_init places the return code in addr */
1690 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1694 p = kthread_run(ftraced, NULL, "ftraced");
1700 last_ftrace_enabled = ftrace_enabled = 1;
1706 ftrace_disabled = 1;
1710 core_initcall(ftrace_dynamic_init);
1711 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1714 # define ftrace_startup() do { } while (0)
1715 # define ftrace_shutdown() do { } while (0)
1716 # define ftrace_startup_sysctl() do { } while (0)
1717 # define ftrace_shutdown_sysctl() do { } while (0)
1718 # define ftrace_force_shutdown() do { } while (0)
1719 #endif /* CONFIG_DYNAMIC_FTRACE */
1722 * ftrace_kill_atomic - kill ftrace from critical sections
1724 * This function should be used by panic code. It stops ftrace
1725 * but in a not so nice way. If you need to simply kill ftrace
1726 * from a non-atomic section, use ftrace_kill.
1728 void ftrace_kill_atomic(void)
1730 ftrace_disabled = 1;
1732 #ifdef CONFIG_DYNAMIC_FTRACE
1733 ftraced_suspend = -1;
1735 clear_ftrace_function();
1739 * ftrace_kill - totally shutdown ftrace
1741 * This is a safety measure. If something was detected that seems
1742 * wrong, calling this function will keep ftrace from doing
1743 * any more modifications, and updates.
1744 * used when something went wrong.
1746 void ftrace_kill(void)
1748 mutex_lock(&ftrace_sysctl_lock);
1749 ftrace_disabled = 1;
1752 clear_ftrace_function();
1753 mutex_unlock(&ftrace_sysctl_lock);
1755 /* Try to totally disable ftrace */
1756 ftrace_force_shutdown();
1760 * register_ftrace_function - register a function for profiling
1761 * @ops - ops structure that holds the function for profiling.
1763 * Register a function to be called by all functions in the
1766 * Note: @ops->func and all the functions it calls must be labeled
1767 * with "notrace", otherwise it will go into a
1770 int register_ftrace_function(struct ftrace_ops *ops)
1774 if (unlikely(ftrace_disabled))
1777 mutex_lock(&ftrace_sysctl_lock);
1778 ret = __register_ftrace_function(ops);
1780 mutex_unlock(&ftrace_sysctl_lock);
1786 * unregister_ftrace_function - unresgister a function for profiling.
1787 * @ops - ops structure that holds the function to unregister
1789 * Unregister a function that was added to be called by ftrace profiling.
1791 int unregister_ftrace_function(struct ftrace_ops *ops)
1795 mutex_lock(&ftrace_sysctl_lock);
1796 ret = __unregister_ftrace_function(ops);
1798 mutex_unlock(&ftrace_sysctl_lock);
1804 ftrace_enable_sysctl(struct ctl_table *table, int write,
1805 struct file *file, void __user *buffer, size_t *lenp,
1810 if (unlikely(ftrace_disabled))
1813 mutex_lock(&ftrace_sysctl_lock);
1815 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1817 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1820 last_ftrace_enabled = ftrace_enabled;
1822 if (ftrace_enabled) {
1824 ftrace_startup_sysctl();
1826 /* we are starting ftrace again */
1827 if (ftrace_list != &ftrace_list_end) {
1828 if (ftrace_list->next == &ftrace_list_end)
1829 ftrace_trace_function = ftrace_list->func;
1831 ftrace_trace_function = ftrace_list_func;
1835 /* stopping ftrace calls (just send to ftrace_stub) */
1836 ftrace_trace_function = ftrace_stub;
1838 ftrace_shutdown_sysctl();
1842 mutex_unlock(&ftrace_sysctl_lock);