]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: add UNINTERRUPTIBLE state for kftraced on disable
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
35
36 /*
37  * ftrace_disabled is set when an anomaly is discovered.
38  * ftrace_disabled is much stronger than ftrace_enabled.
39  */
40 static int ftrace_disabled __read_mostly;
41
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
44
45 static struct ftrace_ops ftrace_list_end __read_mostly =
46 {
47         .func = ftrace_stub,
48 };
49
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53 /* mcount is defined per arch in assembly */
54 EXPORT_SYMBOL(mcount);
55
56 void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* Should never be called by interrupts */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         spin_lock(&ftrace_lock);
119
120         /*
121          * If we are removing the last function, then simply point
122          * to the ftrace_stub.
123          */
124         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125                 ftrace_trace_function = ftrace_stub;
126                 ftrace_list = &ftrace_list_end;
127                 goto out;
128         }
129
130         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131                 if (*p == ops)
132                         break;
133
134         if (*p != ops) {
135                 ret = -1;
136                 goto out;
137         }
138
139         *p = (*p)->next;
140
141         if (ftrace_enabled) {
142                 /* If we only have one func left, then call that directly */
143                 if (ftrace_list == &ftrace_list_end ||
144                     ftrace_list->next == &ftrace_list_end)
145                         ftrace_trace_function = ftrace_list->func;
146         }
147
148  out:
149         spin_unlock(&ftrace_lock);
150
151         return ret;
152 }
153
154 #ifdef CONFIG_DYNAMIC_FTRACE
155
156 static struct task_struct *ftraced_task;
157 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
158 static unsigned long ftraced_iteration_counter;
159
160 enum {
161         FTRACE_ENABLE_CALLS             = (1 << 0),
162         FTRACE_DISABLE_CALLS            = (1 << 1),
163         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
164         FTRACE_ENABLE_MCOUNT            = (1 << 3),
165         FTRACE_DISABLE_MCOUNT           = (1 << 4),
166 };
167
168 static int ftrace_filtered;
169
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_filter_lock);
177
178 struct ftrace_page {
179         struct ftrace_page      *next;
180         int                     index;
181         struct dyn_ftrace       records[];
182 } __attribute__((packed));
183
184 #define ENTRIES_PER_PAGE \
185   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187 /* estimate from running different kernels */
188 #define NR_TO_INIT              10000
189
190 static struct ftrace_page       *ftrace_pages_start;
191 static struct ftrace_page       *ftrace_pages;
192
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195
196 static int ftrace_record_suspend;
197
198 static struct dyn_ftrace *ftrace_free_records;
199
200 static inline int
201 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
202 {
203         struct dyn_ftrace *p;
204         struct hlist_node *t;
205         int found = 0;
206
207         hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
208                 if (p->ip == ip) {
209                         found = 1;
210                         break;
211                 }
212         }
213
214         return found;
215 }
216
217 static inline void
218 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
219 {
220         hlist_add_head(&node->node, &ftrace_hash[key]);
221 }
222
223 static void ftrace_free_rec(struct dyn_ftrace *rec)
224 {
225         /* no locking, only called from kstop_machine */
226
227         rec->ip = (unsigned long)ftrace_free_records;
228         ftrace_free_records = rec;
229         rec->flags |= FTRACE_FL_FREE;
230 }
231
232 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
233 {
234         struct dyn_ftrace *rec;
235
236         /* First check for freed records */
237         if (ftrace_free_records) {
238                 rec = ftrace_free_records;
239
240                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
241                         WARN_ON_ONCE(1);
242                         ftrace_free_records = NULL;
243                         ftrace_disabled = 1;
244                         ftrace_enabled = 0;
245                         return NULL;
246                 }
247
248                 ftrace_free_records = (void *)rec->ip;
249                 memset(rec, 0, sizeof(*rec));
250                 return rec;
251         }
252
253         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
254                 if (!ftrace_pages->next)
255                         return NULL;
256                 ftrace_pages = ftrace_pages->next;
257         }
258
259         return &ftrace_pages->records[ftrace_pages->index++];
260 }
261
262 static void
263 ftrace_record_ip(unsigned long ip)
264 {
265         struct dyn_ftrace *node;
266         unsigned long flags;
267         unsigned long key;
268         int resched;
269         int atomic;
270
271         if (!ftrace_enabled || ftrace_disabled)
272                 return;
273
274         resched = need_resched();
275         preempt_disable_notrace();
276
277         /* We simply need to protect against recursion */
278         __get_cpu_var(ftrace_shutdown_disable_cpu)++;
279         if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
280                 goto out;
281
282         if (unlikely(ftrace_record_suspend))
283                 goto out;
284
285         key = hash_long(ip, FTRACE_HASHBITS);
286
287         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
288
289         if (ftrace_ip_in_hash(ip, key))
290                 goto out;
291
292         atomic = irqs_disabled();
293
294         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
295
296         /* This ip may have hit the hash before the lock */
297         if (ftrace_ip_in_hash(ip, key))
298                 goto out_unlock;
299
300         /*
301          * There's a slight race that the ftraced will update the
302          * hash and reset here. If it is already converted, skip it.
303          */
304         if (ftrace_ip_converted(ip))
305                 goto out_unlock;
306
307         node = ftrace_alloc_dyn_node(ip);
308         if (!node)
309                 goto out_unlock;
310
311         node->ip = ip;
312
313         ftrace_add_hash(node, key);
314
315         ftraced_trigger = 1;
316
317  out_unlock:
318         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
319  out:
320         __get_cpu_var(ftrace_shutdown_disable_cpu)--;
321
322         /* prevent recursion with scheduler */
323         if (resched)
324                 preempt_enable_no_resched_notrace();
325         else
326                 preempt_enable_notrace();
327 }
328
329 #define FTRACE_ADDR ((long)(ftrace_caller))
330 #define MCOUNT_ADDR ((long)(mcount))
331
332 static void
333 __ftrace_replace_code(struct dyn_ftrace *rec,
334                       unsigned char *old, unsigned char *new, int enable)
335 {
336         unsigned long ip;
337         int failed;
338
339         ip = rec->ip;
340
341         if (ftrace_filtered && enable) {
342                 unsigned long fl;
343                 /*
344                  * If filtering is on:
345                  *
346                  * If this record is set to be filtered and
347                  * is enabled then do nothing.
348                  *
349                  * If this record is set to be filtered and
350                  * it is not enabled, enable it.
351                  *
352                  * If this record is not set to be filtered
353                  * and it is not enabled do nothing.
354                  *
355                  * If this record is not set to be filtered and
356                  * it is enabled, disable it.
357                  */
358                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
359
360                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
361                     (fl == 0))
362                         return;
363
364                 /*
365                  * If it is enabled disable it,
366                  * otherwise enable it!
367                  */
368                 if (fl == FTRACE_FL_ENABLED) {
369                         /* swap new and old */
370                         new = old;
371                         old = ftrace_call_replace(ip, FTRACE_ADDR);
372                         rec->flags &= ~FTRACE_FL_ENABLED;
373                 } else {
374                         new = ftrace_call_replace(ip, FTRACE_ADDR);
375                         rec->flags |= FTRACE_FL_ENABLED;
376                 }
377         } else {
378
379                 if (enable)
380                         new = ftrace_call_replace(ip, FTRACE_ADDR);
381                 else
382                         old = ftrace_call_replace(ip, FTRACE_ADDR);
383
384                 if (enable) {
385                         if (rec->flags & FTRACE_FL_ENABLED)
386                                 return;
387                         rec->flags |= FTRACE_FL_ENABLED;
388                 } else {
389                         if (!(rec->flags & FTRACE_FL_ENABLED))
390                                 return;
391                         rec->flags &= ~FTRACE_FL_ENABLED;
392                 }
393         }
394
395         failed = ftrace_modify_code(ip, old, new);
396         if (failed) {
397                 unsigned long key;
398                 /* It is possible that the function hasn't been converted yet */
399                 key = hash_long(ip, FTRACE_HASHBITS);
400                 if (!ftrace_ip_in_hash(ip, key)) {
401                         rec->flags |= FTRACE_FL_FAILED;
402                         ftrace_free_rec(rec);
403                 }
404
405         }
406 }
407
408 static void ftrace_replace_code(int enable)
409 {
410         unsigned char *new = NULL, *old = NULL;
411         struct dyn_ftrace *rec;
412         struct ftrace_page *pg;
413         int i;
414
415         if (enable)
416                 old = ftrace_nop_replace();
417         else
418                 new = ftrace_nop_replace();
419
420         for (pg = ftrace_pages_start; pg; pg = pg->next) {
421                 for (i = 0; i < pg->index; i++) {
422                         rec = &pg->records[i];
423
424                         /* don't modify code that has already faulted */
425                         if (rec->flags & FTRACE_FL_FAILED)
426                                 continue;
427
428                         __ftrace_replace_code(rec, old, new, enable);
429                 }
430         }
431 }
432
433 static void ftrace_shutdown_replenish(void)
434 {
435         if (ftrace_pages->next)
436                 return;
437
438         /* allocate another page */
439         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
440 }
441
442 static void
443 ftrace_code_disable(struct dyn_ftrace *rec)
444 {
445         unsigned long ip;
446         unsigned char *nop, *call;
447         int failed;
448
449         ip = rec->ip;
450
451         nop = ftrace_nop_replace();
452         call = ftrace_call_replace(ip, MCOUNT_ADDR);
453
454         failed = ftrace_modify_code(ip, call, nop);
455         if (failed) {
456                 rec->flags |= FTRACE_FL_FAILED;
457                 ftrace_free_rec(rec);
458         }
459 }
460
461 static int __ftrace_modify_code(void *data)
462 {
463         unsigned long addr;
464         int *command = data;
465
466         if (*command & FTRACE_ENABLE_CALLS)
467                 ftrace_replace_code(1);
468         else if (*command & FTRACE_DISABLE_CALLS)
469                 ftrace_replace_code(0);
470
471         if (*command & FTRACE_UPDATE_TRACE_FUNC)
472                 ftrace_update_ftrace_func(ftrace_trace_function);
473
474         if (*command & FTRACE_ENABLE_MCOUNT) {
475                 addr = (unsigned long)ftrace_record_ip;
476                 ftrace_mcount_set(&addr);
477         } else if (*command & FTRACE_DISABLE_MCOUNT) {
478                 addr = (unsigned long)ftrace_stub;
479                 ftrace_mcount_set(&addr);
480         }
481
482         return 0;
483 }
484
485 static void ftrace_run_update_code(int command)
486 {
487         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
488 }
489
490 static ftrace_func_t saved_ftrace_func;
491
492 static void ftrace_startup(void)
493 {
494         int command = 0;
495
496         if (unlikely(ftrace_disabled))
497                 return;
498
499         mutex_lock(&ftraced_lock);
500         ftraced_suspend++;
501         if (ftraced_suspend == 1)
502                 command |= FTRACE_ENABLE_CALLS;
503
504         if (saved_ftrace_func != ftrace_trace_function) {
505                 saved_ftrace_func = ftrace_trace_function;
506                 command |= FTRACE_UPDATE_TRACE_FUNC;
507         }
508
509         if (!command || !ftrace_enabled)
510                 goto out;
511
512         ftrace_run_update_code(command);
513  out:
514         mutex_unlock(&ftraced_lock);
515 }
516
517 static void ftrace_shutdown(void)
518 {
519         int command = 0;
520
521         if (unlikely(ftrace_disabled))
522                 return;
523
524         mutex_lock(&ftraced_lock);
525         ftraced_suspend--;
526         if (!ftraced_suspend)
527                 command |= FTRACE_DISABLE_CALLS;
528
529         if (saved_ftrace_func != ftrace_trace_function) {
530                 saved_ftrace_func = ftrace_trace_function;
531                 command |= FTRACE_UPDATE_TRACE_FUNC;
532         }
533
534         if (!command || !ftrace_enabled)
535                 goto out;
536
537         ftrace_run_update_code(command);
538  out:
539         mutex_unlock(&ftraced_lock);
540 }
541
542 static void ftrace_startup_sysctl(void)
543 {
544         int command = FTRACE_ENABLE_MCOUNT;
545
546         if (unlikely(ftrace_disabled))
547                 return;
548
549         mutex_lock(&ftraced_lock);
550         /* Force update next time */
551         saved_ftrace_func = NULL;
552         /* ftraced_suspend is true if we want ftrace running */
553         if (ftraced_suspend)
554                 command |= FTRACE_ENABLE_CALLS;
555
556         ftrace_run_update_code(command);
557         mutex_unlock(&ftraced_lock);
558 }
559
560 static void ftrace_shutdown_sysctl(void)
561 {
562         int command = FTRACE_DISABLE_MCOUNT;
563
564         if (unlikely(ftrace_disabled))
565                 return;
566
567         mutex_lock(&ftraced_lock);
568         /* ftraced_suspend is true if ftrace is running */
569         if (ftraced_suspend)
570                 command |= FTRACE_DISABLE_CALLS;
571
572         ftrace_run_update_code(command);
573         mutex_unlock(&ftraced_lock);
574 }
575
576 static cycle_t          ftrace_update_time;
577 static unsigned long    ftrace_update_cnt;
578 unsigned long           ftrace_update_tot_cnt;
579
580 static int __ftrace_update_code(void *ignore)
581 {
582         struct dyn_ftrace *p;
583         struct hlist_head head;
584         struct hlist_node *t;
585         int save_ftrace_enabled;
586         cycle_t start, stop;
587         int i;
588
589         /* Don't be recording funcs now */
590         save_ftrace_enabled = ftrace_enabled;
591         ftrace_enabled = 0;
592
593         start = ftrace_now(raw_smp_processor_id());
594         ftrace_update_cnt = 0;
595
596         /* No locks needed, the machine is stopped! */
597         for (i = 0; i < FTRACE_HASHSIZE; i++) {
598                 if (hlist_empty(&ftrace_hash[i]))
599                         continue;
600
601                 head = ftrace_hash[i];
602                 INIT_HLIST_HEAD(&ftrace_hash[i]);
603
604                 /* all CPUS are stopped, we are safe to modify code */
605                 hlist_for_each_entry(p, t, &head, node) {
606                         ftrace_code_disable(p);
607                         ftrace_update_cnt++;
608                 }
609
610         }
611
612         stop = ftrace_now(raw_smp_processor_id());
613         ftrace_update_time = stop - start;
614         ftrace_update_tot_cnt += ftrace_update_cnt;
615
616         ftrace_enabled = save_ftrace_enabled;
617
618         return 0;
619 }
620
621 static void ftrace_update_code(void)
622 {
623         if (unlikely(ftrace_disabled))
624                 return;
625
626         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
627 }
628
629 static int ftraced(void *ignore)
630 {
631         unsigned long usecs;
632
633         while (!kthread_should_stop()) {
634
635                 set_current_state(TASK_INTERRUPTIBLE);
636
637                 /* check once a second */
638                 schedule_timeout(HZ);
639
640                 if (unlikely(ftrace_disabled))
641                         continue;
642
643                 mutex_lock(&ftrace_sysctl_lock);
644                 mutex_lock(&ftraced_lock);
645                 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
646                         ftrace_record_suspend++;
647                         ftrace_update_code();
648                         usecs = nsecs_to_usecs(ftrace_update_time);
649                         if (ftrace_update_tot_cnt > 100000) {
650                                 ftrace_update_tot_cnt = 0;
651                                 pr_info("hm, dftrace overflow: %lu change%s"
652                                          " (%lu total) in %lu usec%s\n",
653                                         ftrace_update_cnt,
654                                         ftrace_update_cnt != 1 ? "s" : "",
655                                         ftrace_update_tot_cnt,
656                                         usecs, usecs != 1 ? "s" : "");
657                                 ftrace_disabled = 1;
658                                 WARN_ON_ONCE(1);
659                         }
660                         ftraced_trigger = 0;
661                         ftrace_record_suspend--;
662                 }
663                 ftraced_iteration_counter++;
664                 mutex_unlock(&ftraced_lock);
665                 mutex_unlock(&ftrace_sysctl_lock);
666
667                 wake_up_interruptible(&ftraced_waiters);
668
669                 ftrace_shutdown_replenish();
670         }
671         __set_current_state(TASK_RUNNING);
672         return 0;
673 }
674
675 static int __init ftrace_dyn_table_alloc(void)
676 {
677         struct ftrace_page *pg;
678         int cnt;
679         int i;
680
681         /* allocate a few pages */
682         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
683         if (!ftrace_pages_start)
684                 return -1;
685
686         /*
687          * Allocate a few more pages.
688          *
689          * TODO: have some parser search vmlinux before
690          *   final linking to find all calls to ftrace.
691          *   Then we can:
692          *    a) know how many pages to allocate.
693          *     and/or
694          *    b) set up the table then.
695          *
696          *  The dynamic code is still necessary for
697          *  modules.
698          */
699
700         pg = ftrace_pages = ftrace_pages_start;
701
702         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
703
704         for (i = 0; i < cnt; i++) {
705                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
706
707                 /* If we fail, we'll try later anyway */
708                 if (!pg->next)
709                         break;
710
711                 pg = pg->next;
712         }
713
714         return 0;
715 }
716
717 enum {
718         FTRACE_ITER_FILTER      = (1 << 0),
719         FTRACE_ITER_CONT        = (1 << 1),
720 };
721
722 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
723
724 struct ftrace_iterator {
725         loff_t                  pos;
726         struct ftrace_page      *pg;
727         unsigned                idx;
728         unsigned                flags;
729         unsigned char           buffer[FTRACE_BUFF_MAX+1];
730         unsigned                buffer_idx;
731         unsigned                filtered;
732 };
733
734 static void *
735 t_next(struct seq_file *m, void *v, loff_t *pos)
736 {
737         struct ftrace_iterator *iter = m->private;
738         struct dyn_ftrace *rec = NULL;
739
740         (*pos)++;
741
742  retry:
743         if (iter->idx >= iter->pg->index) {
744                 if (iter->pg->next) {
745                         iter->pg = iter->pg->next;
746                         iter->idx = 0;
747                         goto retry;
748                 }
749         } else {
750                 rec = &iter->pg->records[iter->idx++];
751                 if ((rec->flags & FTRACE_FL_FAILED) ||
752                     ((iter->flags & FTRACE_ITER_FILTER) &&
753                      !(rec->flags & FTRACE_FL_FILTER))) {
754                         rec = NULL;
755                         goto retry;
756                 }
757         }
758
759         iter->pos = *pos;
760
761         return rec;
762 }
763
764 static void *t_start(struct seq_file *m, loff_t *pos)
765 {
766         struct ftrace_iterator *iter = m->private;
767         void *p = NULL;
768         loff_t l = -1;
769
770         if (*pos != iter->pos) {
771                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
772                         ;
773         } else {
774                 l = *pos;
775                 p = t_next(m, p, &l);
776         }
777
778         return p;
779 }
780
781 static void t_stop(struct seq_file *m, void *p)
782 {
783 }
784
785 static int t_show(struct seq_file *m, void *v)
786 {
787         struct dyn_ftrace *rec = v;
788         char str[KSYM_SYMBOL_LEN];
789
790         if (!rec)
791                 return 0;
792
793         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
794
795         seq_printf(m, "%s\n", str);
796
797         return 0;
798 }
799
800 static struct seq_operations show_ftrace_seq_ops = {
801         .start = t_start,
802         .next = t_next,
803         .stop = t_stop,
804         .show = t_show,
805 };
806
807 static int
808 ftrace_avail_open(struct inode *inode, struct file *file)
809 {
810         struct ftrace_iterator *iter;
811         int ret;
812
813         if (unlikely(ftrace_disabled))
814                 return -ENODEV;
815
816         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
817         if (!iter)
818                 return -ENOMEM;
819
820         iter->pg = ftrace_pages_start;
821         iter->pos = -1;
822
823         ret = seq_open(file, &show_ftrace_seq_ops);
824         if (!ret) {
825                 struct seq_file *m = file->private_data;
826
827                 m->private = iter;
828         } else {
829                 kfree(iter);
830         }
831
832         return ret;
833 }
834
835 int ftrace_avail_release(struct inode *inode, struct file *file)
836 {
837         struct seq_file *m = (struct seq_file *)file->private_data;
838         struct ftrace_iterator *iter = m->private;
839
840         seq_release(inode, file);
841         kfree(iter);
842
843         return 0;
844 }
845
846 static void ftrace_filter_reset(void)
847 {
848         struct ftrace_page *pg;
849         struct dyn_ftrace *rec;
850         unsigned i;
851
852         /* keep kstop machine from running */
853         preempt_disable();
854         ftrace_filtered = 0;
855         pg = ftrace_pages_start;
856         while (pg) {
857                 for (i = 0; i < pg->index; i++) {
858                         rec = &pg->records[i];
859                         if (rec->flags & FTRACE_FL_FAILED)
860                                 continue;
861                         rec->flags &= ~FTRACE_FL_FILTER;
862                 }
863                 pg = pg->next;
864         }
865         preempt_enable();
866 }
867
868 static int
869 ftrace_filter_open(struct inode *inode, struct file *file)
870 {
871         struct ftrace_iterator *iter;
872         int ret = 0;
873
874         if (unlikely(ftrace_disabled))
875                 return -ENODEV;
876
877         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
878         if (!iter)
879                 return -ENOMEM;
880
881         mutex_lock(&ftrace_filter_lock);
882         if ((file->f_mode & FMODE_WRITE) &&
883             !(file->f_flags & O_APPEND))
884                 ftrace_filter_reset();
885
886         if (file->f_mode & FMODE_READ) {
887                 iter->pg = ftrace_pages_start;
888                 iter->pos = -1;
889                 iter->flags = FTRACE_ITER_FILTER;
890
891                 ret = seq_open(file, &show_ftrace_seq_ops);
892                 if (!ret) {
893                         struct seq_file *m = file->private_data;
894                         m->private = iter;
895                 } else
896                         kfree(iter);
897         } else
898                 file->private_data = iter;
899         mutex_unlock(&ftrace_filter_lock);
900
901         return ret;
902 }
903
904 static ssize_t
905 ftrace_filter_read(struct file *file, char __user *ubuf,
906                        size_t cnt, loff_t *ppos)
907 {
908         if (file->f_mode & FMODE_READ)
909                 return seq_read(file, ubuf, cnt, ppos);
910         else
911                 return -EPERM;
912 }
913
914 static loff_t
915 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
916 {
917         loff_t ret;
918
919         if (file->f_mode & FMODE_READ)
920                 ret = seq_lseek(file, offset, origin);
921         else
922                 file->f_pos = ret = 1;
923
924         return ret;
925 }
926
927 enum {
928         MATCH_FULL,
929         MATCH_FRONT_ONLY,
930         MATCH_MIDDLE_ONLY,
931         MATCH_END_ONLY,
932 };
933
934 static void
935 ftrace_match(unsigned char *buff, int len)
936 {
937         char str[KSYM_SYMBOL_LEN];
938         char *search = NULL;
939         struct ftrace_page *pg;
940         struct dyn_ftrace *rec;
941         int type = MATCH_FULL;
942         unsigned i, match = 0, search_len = 0;
943
944         for (i = 0; i < len; i++) {
945                 if (buff[i] == '*') {
946                         if (!i) {
947                                 search = buff + i + 1;
948                                 type = MATCH_END_ONLY;
949                                 search_len = len - (i + 1);
950                         } else {
951                                 if (type == MATCH_END_ONLY) {
952                                         type = MATCH_MIDDLE_ONLY;
953                                 } else {
954                                         match = i;
955                                         type = MATCH_FRONT_ONLY;
956                                 }
957                                 buff[i] = 0;
958                                 break;
959                         }
960                 }
961         }
962
963         /* keep kstop machine from running */
964         preempt_disable();
965         ftrace_filtered = 1;
966         pg = ftrace_pages_start;
967         while (pg) {
968                 for (i = 0; i < pg->index; i++) {
969                         int matched = 0;
970                         char *ptr;
971
972                         rec = &pg->records[i];
973                         if (rec->flags & FTRACE_FL_FAILED)
974                                 continue;
975                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
976                         switch (type) {
977                         case MATCH_FULL:
978                                 if (strcmp(str, buff) == 0)
979                                         matched = 1;
980                                 break;
981                         case MATCH_FRONT_ONLY:
982                                 if (memcmp(str, buff, match) == 0)
983                                         matched = 1;
984                                 break;
985                         case MATCH_MIDDLE_ONLY:
986                                 if (strstr(str, search))
987                                         matched = 1;
988                                 break;
989                         case MATCH_END_ONLY:
990                                 ptr = strstr(str, search);
991                                 if (ptr && (ptr[search_len] == 0))
992                                         matched = 1;
993                                 break;
994                         }
995                         if (matched)
996                                 rec->flags |= FTRACE_FL_FILTER;
997                 }
998                 pg = pg->next;
999         }
1000         preempt_enable();
1001 }
1002
1003 static ssize_t
1004 ftrace_filter_write(struct file *file, const char __user *ubuf,
1005                     size_t cnt, loff_t *ppos)
1006 {
1007         struct ftrace_iterator *iter;
1008         char ch;
1009         size_t read = 0;
1010         ssize_t ret;
1011
1012         if (!cnt || cnt < 0)
1013                 return 0;
1014
1015         mutex_lock(&ftrace_filter_lock);
1016
1017         if (file->f_mode & FMODE_READ) {
1018                 struct seq_file *m = file->private_data;
1019                 iter = m->private;
1020         } else
1021                 iter = file->private_data;
1022
1023         if (!*ppos) {
1024                 iter->flags &= ~FTRACE_ITER_CONT;
1025                 iter->buffer_idx = 0;
1026         }
1027
1028         ret = get_user(ch, ubuf++);
1029         if (ret)
1030                 goto out;
1031         read++;
1032         cnt--;
1033
1034         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1035                 /* skip white space */
1036                 while (cnt && isspace(ch)) {
1037                         ret = get_user(ch, ubuf++);
1038                         if (ret)
1039                                 goto out;
1040                         read++;
1041                         cnt--;
1042                 }
1043
1044
1045                 if (isspace(ch)) {
1046                         file->f_pos += read;
1047                         ret = read;
1048                         goto out;
1049                 }
1050
1051                 iter->buffer_idx = 0;
1052         }
1053
1054         while (cnt && !isspace(ch)) {
1055                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1056                         iter->buffer[iter->buffer_idx++] = ch;
1057                 else {
1058                         ret = -EINVAL;
1059                         goto out;
1060                 }
1061                 ret = get_user(ch, ubuf++);
1062                 if (ret)
1063                         goto out;
1064                 read++;
1065                 cnt--;
1066         }
1067
1068         if (isspace(ch)) {
1069                 iter->filtered++;
1070                 iter->buffer[iter->buffer_idx] = 0;
1071                 ftrace_match(iter->buffer, iter->buffer_idx);
1072                 iter->buffer_idx = 0;
1073         } else
1074                 iter->flags |= FTRACE_ITER_CONT;
1075
1076
1077         file->f_pos += read;
1078
1079         ret = read;
1080  out:
1081         mutex_unlock(&ftrace_filter_lock);
1082
1083         return ret;
1084 }
1085
1086 /**
1087  * ftrace_set_filter - set a function to filter on in ftrace
1088  * @buf - the string that holds the function filter text.
1089  * @len - the length of the string.
1090  * @reset - non zero to reset all filters before applying this filter.
1091  *
1092  * Filters denote which functions should be enabled when tracing is enabled.
1093  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1094  */
1095 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1096 {
1097         if (unlikely(ftrace_disabled))
1098                 return;
1099
1100         mutex_lock(&ftrace_filter_lock);
1101         if (reset)
1102                 ftrace_filter_reset();
1103         if (buf)
1104                 ftrace_match(buf, len);
1105         mutex_unlock(&ftrace_filter_lock);
1106 }
1107
1108 static int
1109 ftrace_filter_release(struct inode *inode, struct file *file)
1110 {
1111         struct seq_file *m = (struct seq_file *)file->private_data;
1112         struct ftrace_iterator *iter;
1113
1114         mutex_lock(&ftrace_filter_lock);
1115         if (file->f_mode & FMODE_READ) {
1116                 iter = m->private;
1117
1118                 seq_release(inode, file);
1119         } else
1120                 iter = file->private_data;
1121
1122         if (iter->buffer_idx) {
1123                 iter->filtered++;
1124                 iter->buffer[iter->buffer_idx] = 0;
1125                 ftrace_match(iter->buffer, iter->buffer_idx);
1126         }
1127
1128         mutex_lock(&ftrace_sysctl_lock);
1129         mutex_lock(&ftraced_lock);
1130         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1131                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1132         mutex_unlock(&ftraced_lock);
1133         mutex_unlock(&ftrace_sysctl_lock);
1134
1135         kfree(iter);
1136         mutex_unlock(&ftrace_filter_lock);
1137         return 0;
1138 }
1139
1140 static struct file_operations ftrace_avail_fops = {
1141         .open = ftrace_avail_open,
1142         .read = seq_read,
1143         .llseek = seq_lseek,
1144         .release = ftrace_avail_release,
1145 };
1146
1147 static struct file_operations ftrace_filter_fops = {
1148         .open = ftrace_filter_open,
1149         .read = ftrace_filter_read,
1150         .write = ftrace_filter_write,
1151         .llseek = ftrace_filter_lseek,
1152         .release = ftrace_filter_release,
1153 };
1154
1155 /**
1156  * ftrace_force_update - force an update to all recording ftrace functions
1157  *
1158  * The ftrace dynamic update daemon only wakes up once a second.
1159  * There may be cases where an update needs to be done immediately
1160  * for tests or internal kernel tracing to begin. This function
1161  * wakes the daemon to do an update and will not return until the
1162  * update is complete.
1163  */
1164 int ftrace_force_update(void)
1165 {
1166         unsigned long last_counter;
1167         DECLARE_WAITQUEUE(wait, current);
1168         int ret = 0;
1169
1170         if (unlikely(ftrace_disabled))
1171                 return -ENODEV;
1172
1173         mutex_lock(&ftraced_lock);
1174         last_counter = ftraced_iteration_counter;
1175
1176         set_current_state(TASK_INTERRUPTIBLE);
1177         add_wait_queue(&ftraced_waiters, &wait);
1178
1179         if (unlikely(!ftraced_task)) {
1180                 ret = -ENODEV;
1181                 goto out;
1182         }
1183
1184         do {
1185                 mutex_unlock(&ftraced_lock);
1186                 wake_up_process(ftraced_task);
1187                 schedule();
1188                 mutex_lock(&ftraced_lock);
1189                 if (signal_pending(current)) {
1190                         ret = -EINTR;
1191                         break;
1192                 }
1193                 set_current_state(TASK_INTERRUPTIBLE);
1194         } while (last_counter == ftraced_iteration_counter);
1195
1196  out:
1197         mutex_unlock(&ftraced_lock);
1198         remove_wait_queue(&ftraced_waiters, &wait);
1199         set_current_state(TASK_RUNNING);
1200
1201         return ret;
1202 }
1203
1204 static void ftrace_force_shutdown(void)
1205 {
1206         struct task_struct *task;
1207         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1208
1209         mutex_lock(&ftraced_lock);
1210         task = ftraced_task;
1211         ftraced_task = NULL;
1212         ftraced_suspend = -1;
1213         ftrace_run_update_code(command);
1214         mutex_unlock(&ftraced_lock);
1215
1216         if (task)
1217                 kthread_stop(task);
1218 }
1219
1220 static __init int ftrace_init_debugfs(void)
1221 {
1222         struct dentry *d_tracer;
1223         struct dentry *entry;
1224
1225         d_tracer = tracing_init_dentry();
1226
1227         entry = debugfs_create_file("available_filter_functions", 0444,
1228                                     d_tracer, NULL, &ftrace_avail_fops);
1229         if (!entry)
1230                 pr_warning("Could not create debugfs "
1231                            "'available_filter_functions' entry\n");
1232
1233         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1234                                     NULL, &ftrace_filter_fops);
1235         if (!entry)
1236                 pr_warning("Could not create debugfs "
1237                            "'set_ftrace_filter' entry\n");
1238         return 0;
1239 }
1240
1241 fs_initcall(ftrace_init_debugfs);
1242
1243 static int __init ftrace_dynamic_init(void)
1244 {
1245         struct task_struct *p;
1246         unsigned long addr;
1247         int ret;
1248
1249         addr = (unsigned long)ftrace_record_ip;
1250
1251         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1252
1253         /* ftrace_dyn_arch_init places the return code in addr */
1254         if (addr) {
1255                 ret = (int)addr;
1256                 goto failed;
1257         }
1258
1259         ret = ftrace_dyn_table_alloc();
1260         if (ret)
1261                 goto failed;
1262
1263         p = kthread_run(ftraced, NULL, "ftraced");
1264         if (IS_ERR(p)) {
1265                 ret = -1;
1266                 goto failed;
1267         }
1268
1269         last_ftrace_enabled = ftrace_enabled = 1;
1270         ftraced_task = p;
1271
1272         return 0;
1273
1274  failed:
1275         ftrace_disabled = 1;
1276         return ret;
1277 }
1278
1279 core_initcall(ftrace_dynamic_init);
1280 #else
1281 # define ftrace_startup()               do { } while (0)
1282 # define ftrace_shutdown()              do { } while (0)
1283 # define ftrace_startup_sysctl()        do { } while (0)
1284 # define ftrace_shutdown_sysctl()       do { } while (0)
1285 # define ftrace_force_shutdown()        do { } while (0)
1286 #endif /* CONFIG_DYNAMIC_FTRACE */
1287
1288 /**
1289  * ftrace_kill - totally shutdown ftrace
1290  *
1291  * This is a safety measure. If something was detected that seems
1292  * wrong, calling this function will keep ftrace from doing
1293  * any more modifications, and updates.
1294  * used when something went wrong.
1295  */
1296 void ftrace_kill(void)
1297 {
1298         mutex_lock(&ftrace_sysctl_lock);
1299         ftrace_disabled = 1;
1300         ftrace_enabled = 0;
1301
1302         clear_ftrace_function();
1303         mutex_unlock(&ftrace_sysctl_lock);
1304
1305         /* Try to totally disable ftrace */
1306         ftrace_force_shutdown();
1307 }
1308
1309 /**
1310  * register_ftrace_function - register a function for profiling
1311  * @ops - ops structure that holds the function for profiling.
1312  *
1313  * Register a function to be called by all functions in the
1314  * kernel.
1315  *
1316  * Note: @ops->func and all the functions it calls must be labeled
1317  *       with "notrace", otherwise it will go into a
1318  *       recursive loop.
1319  */
1320 int register_ftrace_function(struct ftrace_ops *ops)
1321 {
1322         int ret;
1323
1324         if (unlikely(ftrace_disabled))
1325                 return -1;
1326
1327         mutex_lock(&ftrace_sysctl_lock);
1328         ret = __register_ftrace_function(ops);
1329         ftrace_startup();
1330         mutex_unlock(&ftrace_sysctl_lock);
1331
1332         return ret;
1333 }
1334
1335 /**
1336  * unregister_ftrace_function - unresgister a function for profiling.
1337  * @ops - ops structure that holds the function to unregister
1338  *
1339  * Unregister a function that was added to be called by ftrace profiling.
1340  */
1341 int unregister_ftrace_function(struct ftrace_ops *ops)
1342 {
1343         int ret;
1344
1345         mutex_lock(&ftrace_sysctl_lock);
1346         ret = __unregister_ftrace_function(ops);
1347         ftrace_shutdown();
1348         mutex_unlock(&ftrace_sysctl_lock);
1349
1350         return ret;
1351 }
1352
1353 int
1354 ftrace_enable_sysctl(struct ctl_table *table, int write,
1355                      struct file *file, void __user *buffer, size_t *lenp,
1356                      loff_t *ppos)
1357 {
1358         int ret;
1359
1360         if (unlikely(ftrace_disabled))
1361                 return -ENODEV;
1362
1363         mutex_lock(&ftrace_sysctl_lock);
1364
1365         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1366
1367         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1368                 goto out;
1369
1370         last_ftrace_enabled = ftrace_enabled;
1371
1372         if (ftrace_enabled) {
1373
1374                 ftrace_startup_sysctl();
1375
1376                 /* we are starting ftrace again */
1377                 if (ftrace_list != &ftrace_list_end) {
1378                         if (ftrace_list->next == &ftrace_list_end)
1379                                 ftrace_trace_function = ftrace_list->func;
1380                         else
1381                                 ftrace_trace_function = ftrace_list_func;
1382                 }
1383
1384         } else {
1385                 /* stopping ftrace calls (just send to ftrace_stub) */
1386                 ftrace_trace_function = ftrace_stub;
1387
1388                 ftrace_shutdown_sysctl();
1389         }
1390
1391  out:
1392         mutex_unlock(&ftrace_sysctl_lock);
1393         return ret;
1394 }