2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
40 static int ftrace_disabled __read_mostly;
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
45 static struct ftrace_ops ftrace_list_end __read_mostly =
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
53 void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
55 struct ftrace_ops *op = ftrace_list;
57 /* in case someone actually ports this to alpha! */
58 read_barrier_depends();
60 while (op != &ftrace_list_end) {
62 read_barrier_depends();
63 op->func(ip, parent_ip);
69 * clear_ftrace_function - reset the ftrace function
71 * This NULLs the ftrace function and in essence stops
72 * tracing. There may be lag
74 void clear_ftrace_function(void)
76 ftrace_trace_function = ftrace_stub;
79 static int __register_ftrace_function(struct ftrace_ops *ops)
81 /* Should never be called by interrupts */
82 spin_lock(&ftrace_lock);
84 ops->next = ftrace_list;
86 * We are entering ops into the ftrace_list but another
87 * CPU might be walking that list. We need to make sure
88 * the ops->next pointer is valid before another CPU sees
89 * the ops pointer included into the ftrace_list.
96 * For one func, simply call it directly.
97 * For more than one func, call the chain.
99 if (ops->next == &ftrace_list_end)
100 ftrace_trace_function = ops->func;
102 ftrace_trace_function = ftrace_list_func;
105 spin_unlock(&ftrace_lock);
110 static int __unregister_ftrace_function(struct ftrace_ops *ops)
112 struct ftrace_ops **p;
115 spin_lock(&ftrace_lock);
118 * If we are removing the last function, then simply point
119 * to the ftrace_stub.
121 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122 ftrace_trace_function = ftrace_stub;
123 ftrace_list = &ftrace_list_end;
127 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
138 if (ftrace_enabled) {
139 /* If we only have one func left, then call that directly */
140 if (ftrace_list == &ftrace_list_end ||
141 ftrace_list->next == &ftrace_list_end)
142 ftrace_trace_function = ftrace_list->func;
146 spin_unlock(&ftrace_lock);
151 #ifdef CONFIG_DYNAMIC_FTRACE
153 static struct task_struct *ftraced_task;
154 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
155 static unsigned long ftraced_iteration_counter;
158 FTRACE_ENABLE_CALLS = (1 << 0),
159 FTRACE_DISABLE_CALLS = (1 << 1),
160 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
161 FTRACE_ENABLE_MCOUNT = (1 << 3),
162 FTRACE_DISABLE_MCOUNT = (1 << 4),
165 static int ftrace_filtered;
167 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
169 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
171 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
172 static DEFINE_MUTEX(ftraced_lock);
173 static DEFINE_MUTEX(ftrace_regex_lock);
176 struct ftrace_page *next;
178 struct dyn_ftrace records[];
181 #define ENTRIES_PER_PAGE \
182 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
184 /* estimate from running different kernels */
185 #define NR_TO_INIT 10000
187 static struct ftrace_page *ftrace_pages_start;
188 static struct ftrace_page *ftrace_pages;
190 static int ftraced_trigger;
191 static int ftraced_suspend;
193 static int ftrace_record_suspend;
195 static struct dyn_ftrace *ftrace_free_records;
198 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
200 struct dyn_ftrace *p;
201 struct hlist_node *t;
204 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
215 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
217 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
220 static void ftrace_free_rec(struct dyn_ftrace *rec)
222 /* no locking, only called from kstop_machine */
224 rec->ip = (unsigned long)ftrace_free_records;
225 ftrace_free_records = rec;
226 rec->flags |= FTRACE_FL_FREE;
229 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
231 struct dyn_ftrace *rec;
233 /* First check for freed records */
234 if (ftrace_free_records) {
235 rec = ftrace_free_records;
237 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
239 ftrace_free_records = NULL;
245 ftrace_free_records = (void *)rec->ip;
246 memset(rec, 0, sizeof(*rec));
250 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
251 if (!ftrace_pages->next)
253 ftrace_pages = ftrace_pages->next;
256 return &ftrace_pages->records[ftrace_pages->index++];
260 ftrace_record_ip(unsigned long ip)
262 struct dyn_ftrace *node;
269 if (!ftrace_enabled || ftrace_disabled)
272 resched = need_resched();
273 preempt_disable_notrace();
276 * We simply need to protect against recursion.
277 * Use the the raw version of smp_processor_id and not
278 * __get_cpu_var which can call debug hooks that can
279 * cause a recursive crash here.
281 cpu = raw_smp_processor_id();
282 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
283 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
286 if (unlikely(ftrace_record_suspend))
289 key = hash_long(ip, FTRACE_HASHBITS);
291 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
293 if (ftrace_ip_in_hash(ip, key))
296 atomic = irqs_disabled();
298 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
300 /* This ip may have hit the hash before the lock */
301 if (ftrace_ip_in_hash(ip, key))
305 * There's a slight race that the ftraced will update the
306 * hash and reset here. If it is already converted, skip it.
308 if (ftrace_ip_converted(ip))
311 node = ftrace_alloc_dyn_node(ip);
317 ftrace_add_hash(node, key);
322 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
324 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
326 /* prevent recursion with scheduler */
328 preempt_enable_no_resched_notrace();
330 preempt_enable_notrace();
333 #define FTRACE_ADDR ((long)(ftrace_caller))
334 #define MCOUNT_ADDR ((long)(mcount))
337 __ftrace_replace_code(struct dyn_ftrace *rec,
338 unsigned char *old, unsigned char *new, int enable)
340 unsigned long ip, fl;
345 if (ftrace_filtered && enable) {
347 * If filtering is on:
349 * If this record is set to be filtered and
350 * is enabled then do nothing.
352 * If this record is set to be filtered and
353 * it is not enabled, enable it.
355 * If this record is not set to be filtered
356 * and it is not enabled do nothing.
358 * If this record is set not to trace then
361 * If this record is not set to be filtered and
362 * it is enabled, disable it.
364 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
366 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
367 (fl == 0) || (rec->flags & FTRACE_FL_NOTRACE))
371 * If it is enabled disable it,
372 * otherwise enable it!
374 if (fl == FTRACE_FL_ENABLED) {
375 /* swap new and old */
377 old = ftrace_call_replace(ip, FTRACE_ADDR);
378 rec->flags &= ~FTRACE_FL_ENABLED;
380 new = ftrace_call_replace(ip, FTRACE_ADDR);
381 rec->flags |= FTRACE_FL_ENABLED;
387 * If this record is set not to trace and is
388 * not enabled, do nothing.
390 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
391 if (fl == FTRACE_FL_NOTRACE)
394 new = ftrace_call_replace(ip, FTRACE_ADDR);
396 old = ftrace_call_replace(ip, FTRACE_ADDR);
399 if (rec->flags & FTRACE_FL_ENABLED)
401 rec->flags |= FTRACE_FL_ENABLED;
403 if (!(rec->flags & FTRACE_FL_ENABLED))
405 rec->flags &= ~FTRACE_FL_ENABLED;
409 failed = ftrace_modify_code(ip, old, new);
412 /* It is possible that the function hasn't been converted yet */
413 key = hash_long(ip, FTRACE_HASHBITS);
414 if (!ftrace_ip_in_hash(ip, key)) {
415 rec->flags |= FTRACE_FL_FAILED;
416 ftrace_free_rec(rec);
422 static void ftrace_replace_code(int enable)
424 unsigned char *new = NULL, *old = NULL;
425 struct dyn_ftrace *rec;
426 struct ftrace_page *pg;
430 old = ftrace_nop_replace();
432 new = ftrace_nop_replace();
434 for (pg = ftrace_pages_start; pg; pg = pg->next) {
435 for (i = 0; i < pg->index; i++) {
436 rec = &pg->records[i];
438 /* don't modify code that has already faulted */
439 if (rec->flags & FTRACE_FL_FAILED)
442 __ftrace_replace_code(rec, old, new, enable);
447 static void ftrace_shutdown_replenish(void)
449 if (ftrace_pages->next)
452 /* allocate another page */
453 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
457 ftrace_code_disable(struct dyn_ftrace *rec)
460 unsigned char *nop, *call;
465 nop = ftrace_nop_replace();
466 call = ftrace_call_replace(ip, MCOUNT_ADDR);
468 failed = ftrace_modify_code(ip, call, nop);
470 rec->flags |= FTRACE_FL_FAILED;
471 ftrace_free_rec(rec);
477 static int __ftrace_modify_code(void *data)
482 if (*command & FTRACE_ENABLE_CALLS)
483 ftrace_replace_code(1);
484 else if (*command & FTRACE_DISABLE_CALLS)
485 ftrace_replace_code(0);
487 if (*command & FTRACE_UPDATE_TRACE_FUNC)
488 ftrace_update_ftrace_func(ftrace_trace_function);
490 if (*command & FTRACE_ENABLE_MCOUNT) {
491 addr = (unsigned long)ftrace_record_ip;
492 ftrace_mcount_set(&addr);
493 } else if (*command & FTRACE_DISABLE_MCOUNT) {
494 addr = (unsigned long)ftrace_stub;
495 ftrace_mcount_set(&addr);
501 static void ftrace_run_update_code(int command)
503 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
506 static ftrace_func_t saved_ftrace_func;
508 static void ftrace_startup(void)
512 if (unlikely(ftrace_disabled))
515 mutex_lock(&ftraced_lock);
517 if (ftraced_suspend == 1)
518 command |= FTRACE_ENABLE_CALLS;
520 if (saved_ftrace_func != ftrace_trace_function) {
521 saved_ftrace_func = ftrace_trace_function;
522 command |= FTRACE_UPDATE_TRACE_FUNC;
525 if (!command || !ftrace_enabled)
528 ftrace_run_update_code(command);
530 mutex_unlock(&ftraced_lock);
533 static void ftrace_shutdown(void)
537 if (unlikely(ftrace_disabled))
540 mutex_lock(&ftraced_lock);
542 if (!ftraced_suspend)
543 command |= FTRACE_DISABLE_CALLS;
545 if (saved_ftrace_func != ftrace_trace_function) {
546 saved_ftrace_func = ftrace_trace_function;
547 command |= FTRACE_UPDATE_TRACE_FUNC;
550 if (!command || !ftrace_enabled)
553 ftrace_run_update_code(command);
555 mutex_unlock(&ftraced_lock);
558 static void ftrace_startup_sysctl(void)
560 int command = FTRACE_ENABLE_MCOUNT;
562 if (unlikely(ftrace_disabled))
565 mutex_lock(&ftraced_lock);
566 /* Force update next time */
567 saved_ftrace_func = NULL;
568 /* ftraced_suspend is true if we want ftrace running */
570 command |= FTRACE_ENABLE_CALLS;
572 ftrace_run_update_code(command);
573 mutex_unlock(&ftraced_lock);
576 static void ftrace_shutdown_sysctl(void)
578 int command = FTRACE_DISABLE_MCOUNT;
580 if (unlikely(ftrace_disabled))
583 mutex_lock(&ftraced_lock);
584 /* ftraced_suspend is true if ftrace is running */
586 command |= FTRACE_DISABLE_CALLS;
588 ftrace_run_update_code(command);
589 mutex_unlock(&ftraced_lock);
592 static cycle_t ftrace_update_time;
593 static unsigned long ftrace_update_cnt;
594 unsigned long ftrace_update_tot_cnt;
596 static int __ftrace_update_code(void *ignore)
598 struct dyn_ftrace *p;
599 struct hlist_head head;
600 struct hlist_node *t;
601 int save_ftrace_enabled;
605 /* Don't be recording funcs now */
606 save_ftrace_enabled = ftrace_enabled;
609 start = ftrace_now(raw_smp_processor_id());
610 ftrace_update_cnt = 0;
612 /* No locks needed, the machine is stopped! */
613 for (i = 0; i < FTRACE_HASHSIZE; i++) {
614 if (hlist_empty(&ftrace_hash[i]))
617 head = ftrace_hash[i];
618 INIT_HLIST_HEAD(&ftrace_hash[i]);
620 /* all CPUS are stopped, we are safe to modify code */
621 hlist_for_each_entry(p, t, &head, node) {
622 if (ftrace_code_disable(p))
628 stop = ftrace_now(raw_smp_processor_id());
629 ftrace_update_time = stop - start;
630 ftrace_update_tot_cnt += ftrace_update_cnt;
632 ftrace_enabled = save_ftrace_enabled;
637 static void ftrace_update_code(void)
639 if (unlikely(ftrace_disabled))
642 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
645 static int ftraced(void *ignore)
649 while (!kthread_should_stop()) {
651 set_current_state(TASK_INTERRUPTIBLE);
653 /* check once a second */
654 schedule_timeout(HZ);
656 if (unlikely(ftrace_disabled))
659 mutex_lock(&ftrace_sysctl_lock);
660 mutex_lock(&ftraced_lock);
661 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
662 ftrace_record_suspend++;
663 ftrace_update_code();
664 usecs = nsecs_to_usecs(ftrace_update_time);
665 if (ftrace_update_tot_cnt > 100000) {
666 ftrace_update_tot_cnt = 0;
667 pr_info("hm, dftrace overflow: %lu change%s"
668 " (%lu total) in %lu usec%s\n",
670 ftrace_update_cnt != 1 ? "s" : "",
671 ftrace_update_tot_cnt,
672 usecs, usecs != 1 ? "s" : "");
677 ftrace_record_suspend--;
679 ftraced_iteration_counter++;
680 mutex_unlock(&ftraced_lock);
681 mutex_unlock(&ftrace_sysctl_lock);
683 wake_up_interruptible(&ftraced_waiters);
685 ftrace_shutdown_replenish();
687 __set_current_state(TASK_RUNNING);
691 static int __init ftrace_dyn_table_alloc(void)
693 struct ftrace_page *pg;
697 /* allocate a few pages */
698 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
699 if (!ftrace_pages_start)
703 * Allocate a few more pages.
705 * TODO: have some parser search vmlinux before
706 * final linking to find all calls to ftrace.
708 * a) know how many pages to allocate.
710 * b) set up the table then.
712 * The dynamic code is still necessary for
716 pg = ftrace_pages = ftrace_pages_start;
718 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
720 for (i = 0; i < cnt; i++) {
721 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
723 /* If we fail, we'll try later anyway */
734 FTRACE_ITER_FILTER = (1 << 0),
735 FTRACE_ITER_CONT = (1 << 1),
736 FTRACE_ITER_NOTRACE = (1 << 2),
739 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
741 struct ftrace_iterator {
743 struct ftrace_page *pg;
746 unsigned char buffer[FTRACE_BUFF_MAX+1];
752 t_next(struct seq_file *m, void *v, loff_t *pos)
754 struct ftrace_iterator *iter = m->private;
755 struct dyn_ftrace *rec = NULL;
760 if (iter->idx >= iter->pg->index) {
761 if (iter->pg->next) {
762 iter->pg = iter->pg->next;
767 rec = &iter->pg->records[iter->idx++];
768 if ((rec->flags & FTRACE_FL_FAILED) ||
769 ((iter->flags & FTRACE_ITER_FILTER) &&
770 !(rec->flags & FTRACE_FL_FILTER)) ||
771 ((iter->flags & FTRACE_ITER_NOTRACE) &&
772 !(rec->flags & FTRACE_FL_NOTRACE))) {
783 static void *t_start(struct seq_file *m, loff_t *pos)
785 struct ftrace_iterator *iter = m->private;
789 if (*pos != iter->pos) {
790 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
794 p = t_next(m, p, &l);
800 static void t_stop(struct seq_file *m, void *p)
804 static int t_show(struct seq_file *m, void *v)
806 struct dyn_ftrace *rec = v;
807 char str[KSYM_SYMBOL_LEN];
812 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
814 seq_printf(m, "%s\n", str);
819 static struct seq_operations show_ftrace_seq_ops = {
827 ftrace_avail_open(struct inode *inode, struct file *file)
829 struct ftrace_iterator *iter;
832 if (unlikely(ftrace_disabled))
835 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
839 iter->pg = ftrace_pages_start;
842 ret = seq_open(file, &show_ftrace_seq_ops);
844 struct seq_file *m = file->private_data;
854 int ftrace_avail_release(struct inode *inode, struct file *file)
856 struct seq_file *m = (struct seq_file *)file->private_data;
857 struct ftrace_iterator *iter = m->private;
859 seq_release(inode, file);
865 static void ftrace_filter_reset(int enable)
867 struct ftrace_page *pg;
868 struct dyn_ftrace *rec;
869 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
872 /* keep kstop machine from running */
876 pg = ftrace_pages_start;
878 for (i = 0; i < pg->index; i++) {
879 rec = &pg->records[i];
880 if (rec->flags & FTRACE_FL_FAILED)
890 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
892 struct ftrace_iterator *iter;
895 if (unlikely(ftrace_disabled))
898 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
902 mutex_lock(&ftrace_regex_lock);
903 if ((file->f_mode & FMODE_WRITE) &&
904 !(file->f_flags & O_APPEND))
905 ftrace_filter_reset(enable);
907 if (file->f_mode & FMODE_READ) {
908 iter->pg = ftrace_pages_start;
910 iter->flags = enable ? FTRACE_ITER_FILTER :
913 ret = seq_open(file, &show_ftrace_seq_ops);
915 struct seq_file *m = file->private_data;
920 file->private_data = iter;
921 mutex_unlock(&ftrace_regex_lock);
927 ftrace_filter_open(struct inode *inode, struct file *file)
929 return ftrace_regex_open(inode, file, 1);
933 ftrace_notrace_open(struct inode *inode, struct file *file)
935 return ftrace_regex_open(inode, file, 0);
939 ftrace_regex_read(struct file *file, char __user *ubuf,
940 size_t cnt, loff_t *ppos)
942 if (file->f_mode & FMODE_READ)
943 return seq_read(file, ubuf, cnt, ppos);
949 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
953 if (file->f_mode & FMODE_READ)
954 ret = seq_lseek(file, offset, origin);
956 file->f_pos = ret = 1;
969 ftrace_match(unsigned char *buff, int len, int enable)
971 char str[KSYM_SYMBOL_LEN];
973 struct ftrace_page *pg;
974 struct dyn_ftrace *rec;
975 int type = MATCH_FULL;
976 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
977 unsigned i, match = 0, search_len = 0;
979 for (i = 0; i < len; i++) {
980 if (buff[i] == '*') {
982 search = buff + i + 1;
983 type = MATCH_END_ONLY;
984 search_len = len - (i + 1);
986 if (type == MATCH_END_ONLY) {
987 type = MATCH_MIDDLE_ONLY;
990 type = MATCH_FRONT_ONLY;
998 /* keep kstop machine from running */
1001 ftrace_filtered = 1;
1002 pg = ftrace_pages_start;
1004 for (i = 0; i < pg->index; i++) {
1008 rec = &pg->records[i];
1009 if (rec->flags & FTRACE_FL_FAILED)
1011 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1014 if (strcmp(str, buff) == 0)
1017 case MATCH_FRONT_ONLY:
1018 if (memcmp(str, buff, match) == 0)
1021 case MATCH_MIDDLE_ONLY:
1022 if (strstr(str, search))
1025 case MATCH_END_ONLY:
1026 ptr = strstr(str, search);
1027 if (ptr && (ptr[search_len] == 0))
1040 ftrace_regex_write(struct file *file, const char __user *ubuf,
1041 size_t cnt, loff_t *ppos, int enable)
1043 struct ftrace_iterator *iter;
1048 if (!cnt || cnt < 0)
1051 mutex_lock(&ftrace_regex_lock);
1053 if (file->f_mode & FMODE_READ) {
1054 struct seq_file *m = file->private_data;
1057 iter = file->private_data;
1060 iter->flags &= ~FTRACE_ITER_CONT;
1061 iter->buffer_idx = 0;
1064 ret = get_user(ch, ubuf++);
1070 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1071 /* skip white space */
1072 while (cnt && isspace(ch)) {
1073 ret = get_user(ch, ubuf++);
1081 file->f_pos += read;
1086 iter->buffer_idx = 0;
1089 while (cnt && !isspace(ch)) {
1090 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1091 iter->buffer[iter->buffer_idx++] = ch;
1096 ret = get_user(ch, ubuf++);
1105 iter->buffer[iter->buffer_idx] = 0;
1106 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1107 iter->buffer_idx = 0;
1109 iter->flags |= FTRACE_ITER_CONT;
1112 file->f_pos += read;
1116 mutex_unlock(&ftrace_regex_lock);
1122 ftrace_filter_write(struct file *file, const char __user *ubuf,
1123 size_t cnt, loff_t *ppos)
1125 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1129 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1130 size_t cnt, loff_t *ppos)
1132 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1136 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1138 if (unlikely(ftrace_disabled))
1141 mutex_lock(&ftrace_regex_lock);
1143 ftrace_filter_reset(enable);
1145 ftrace_match(buf, len, enable);
1146 mutex_unlock(&ftrace_regex_lock);
1150 * ftrace_set_filter - set a function to filter on in ftrace
1151 * @buf - the string that holds the function filter text.
1152 * @len - the length of the string.
1153 * @reset - non zero to reset all filters before applying this filter.
1155 * Filters denote which functions should be enabled when tracing is enabled.
1156 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1158 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1160 ftrace_set_regex(buf, len, reset, 1);
1164 * ftrace_set_notrace - set a function to not trace in ftrace
1165 * @buf - the string that holds the function notrace text.
1166 * @len - the length of the string.
1167 * @reset - non zero to reset all filters before applying this filter.
1169 * Notrace Filters denote which functions should not be enabled when tracing
1170 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1173 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1175 ftrace_set_regex(buf, len, reset, 0);
1179 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1181 struct seq_file *m = (struct seq_file *)file->private_data;
1182 struct ftrace_iterator *iter;
1184 mutex_lock(&ftrace_regex_lock);
1185 if (file->f_mode & FMODE_READ) {
1188 seq_release(inode, file);
1190 iter = file->private_data;
1192 if (iter->buffer_idx) {
1194 iter->buffer[iter->buffer_idx] = 0;
1195 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1198 mutex_lock(&ftrace_sysctl_lock);
1199 mutex_lock(&ftraced_lock);
1200 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1201 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1202 mutex_unlock(&ftraced_lock);
1203 mutex_unlock(&ftrace_sysctl_lock);
1206 mutex_unlock(&ftrace_regex_lock);
1211 ftrace_filter_release(struct inode *inode, struct file *file)
1213 return ftrace_regex_release(inode, file, 1);
1217 ftrace_notrace_release(struct inode *inode, struct file *file)
1219 return ftrace_regex_release(inode, file, 0);
1222 static struct file_operations ftrace_avail_fops = {
1223 .open = ftrace_avail_open,
1225 .llseek = seq_lseek,
1226 .release = ftrace_avail_release,
1229 static struct file_operations ftrace_filter_fops = {
1230 .open = ftrace_filter_open,
1231 .read = ftrace_regex_read,
1232 .write = ftrace_filter_write,
1233 .llseek = ftrace_regex_lseek,
1234 .release = ftrace_filter_release,
1237 static struct file_operations ftrace_notrace_fops = {
1238 .open = ftrace_notrace_open,
1239 .read = ftrace_regex_read,
1240 .write = ftrace_notrace_write,
1241 .llseek = ftrace_regex_lseek,
1242 .release = ftrace_notrace_release,
1246 * ftrace_force_update - force an update to all recording ftrace functions
1248 * The ftrace dynamic update daemon only wakes up once a second.
1249 * There may be cases where an update needs to be done immediately
1250 * for tests or internal kernel tracing to begin. This function
1251 * wakes the daemon to do an update and will not return until the
1252 * update is complete.
1254 int ftrace_force_update(void)
1256 unsigned long last_counter;
1257 DECLARE_WAITQUEUE(wait, current);
1260 if (unlikely(ftrace_disabled))
1263 mutex_lock(&ftraced_lock);
1264 last_counter = ftraced_iteration_counter;
1266 set_current_state(TASK_INTERRUPTIBLE);
1267 add_wait_queue(&ftraced_waiters, &wait);
1269 if (unlikely(!ftraced_task)) {
1275 mutex_unlock(&ftraced_lock);
1276 wake_up_process(ftraced_task);
1278 mutex_lock(&ftraced_lock);
1279 if (signal_pending(current)) {
1283 set_current_state(TASK_INTERRUPTIBLE);
1284 } while (last_counter == ftraced_iteration_counter);
1287 mutex_unlock(&ftraced_lock);
1288 remove_wait_queue(&ftraced_waiters, &wait);
1289 set_current_state(TASK_RUNNING);
1294 static void ftrace_force_shutdown(void)
1296 struct task_struct *task;
1297 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1299 mutex_lock(&ftraced_lock);
1300 task = ftraced_task;
1301 ftraced_task = NULL;
1302 ftraced_suspend = -1;
1303 ftrace_run_update_code(command);
1304 mutex_unlock(&ftraced_lock);
1310 static __init int ftrace_init_debugfs(void)
1312 struct dentry *d_tracer;
1313 struct dentry *entry;
1315 d_tracer = tracing_init_dentry();
1317 entry = debugfs_create_file("available_filter_functions", 0444,
1318 d_tracer, NULL, &ftrace_avail_fops);
1320 pr_warning("Could not create debugfs "
1321 "'available_filter_functions' entry\n");
1323 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1324 NULL, &ftrace_filter_fops);
1326 pr_warning("Could not create debugfs "
1327 "'set_ftrace_filter' entry\n");
1329 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1330 NULL, &ftrace_notrace_fops);
1332 pr_warning("Could not create debugfs "
1333 "'set_ftrace_notrace' entry\n");
1337 fs_initcall(ftrace_init_debugfs);
1339 static int __init ftrace_dynamic_init(void)
1341 struct task_struct *p;
1345 addr = (unsigned long)ftrace_record_ip;
1347 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1349 /* ftrace_dyn_arch_init places the return code in addr */
1355 ret = ftrace_dyn_table_alloc();
1359 p = kthread_run(ftraced, NULL, "ftraced");
1365 last_ftrace_enabled = ftrace_enabled = 1;
1371 ftrace_disabled = 1;
1375 core_initcall(ftrace_dynamic_init);
1377 # define ftrace_startup() do { } while (0)
1378 # define ftrace_shutdown() do { } while (0)
1379 # define ftrace_startup_sysctl() do { } while (0)
1380 # define ftrace_shutdown_sysctl() do { } while (0)
1381 # define ftrace_force_shutdown() do { } while (0)
1382 #endif /* CONFIG_DYNAMIC_FTRACE */
1385 * ftrace_kill - totally shutdown ftrace
1387 * This is a safety measure. If something was detected that seems
1388 * wrong, calling this function will keep ftrace from doing
1389 * any more modifications, and updates.
1390 * used when something went wrong.
1392 void ftrace_kill(void)
1394 mutex_lock(&ftrace_sysctl_lock);
1395 ftrace_disabled = 1;
1398 clear_ftrace_function();
1399 mutex_unlock(&ftrace_sysctl_lock);
1401 /* Try to totally disable ftrace */
1402 ftrace_force_shutdown();
1406 * register_ftrace_function - register a function for profiling
1407 * @ops - ops structure that holds the function for profiling.
1409 * Register a function to be called by all functions in the
1412 * Note: @ops->func and all the functions it calls must be labeled
1413 * with "notrace", otherwise it will go into a
1416 int register_ftrace_function(struct ftrace_ops *ops)
1420 if (unlikely(ftrace_disabled))
1423 mutex_lock(&ftrace_sysctl_lock);
1424 ret = __register_ftrace_function(ops);
1426 mutex_unlock(&ftrace_sysctl_lock);
1432 * unregister_ftrace_function - unresgister a function for profiling.
1433 * @ops - ops structure that holds the function to unregister
1435 * Unregister a function that was added to be called by ftrace profiling.
1437 int unregister_ftrace_function(struct ftrace_ops *ops)
1441 mutex_lock(&ftrace_sysctl_lock);
1442 ret = __unregister_ftrace_function(ops);
1444 mutex_unlock(&ftrace_sysctl_lock);
1450 ftrace_enable_sysctl(struct ctl_table *table, int write,
1451 struct file *file, void __user *buffer, size_t *lenp,
1456 if (unlikely(ftrace_disabled))
1459 mutex_lock(&ftrace_sysctl_lock);
1461 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1463 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1466 last_ftrace_enabled = ftrace_enabled;
1468 if (ftrace_enabled) {
1470 ftrace_startup_sysctl();
1472 /* we are starting ftrace again */
1473 if (ftrace_list != &ftrace_list_end) {
1474 if (ftrace_list->next == &ftrace_list_end)
1475 ftrace_trace_function = ftrace_list->func;
1477 ftrace_trace_function = ftrace_list_func;
1481 /* stopping ftrace calls (just send to ftrace_stub) */
1482 ftrace_trace_function = ftrace_stub;
1484 ftrace_shutdown_sysctl();
1488 mutex_unlock(&ftrace_sysctl_lock);