]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: timestamp syncing, prepare
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 int ftrace_enabled;
33 static int last_ftrace_enabled;
34
35 static DEFINE_SPINLOCK(ftrace_lock);
36 static DEFINE_MUTEX(ftrace_sysctl_lock);
37
38 static struct ftrace_ops ftrace_list_end __read_mostly =
39 {
40         .func = ftrace_stub,
41 };
42
43 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
44 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
45
46 /* mcount is defined per arch in assembly */
47 EXPORT_SYMBOL(mcount);
48
49 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
50 {
51         struct ftrace_ops *op = ftrace_list;
52
53         /* in case someone actually ports this to alpha! */
54         read_barrier_depends();
55
56         while (op != &ftrace_list_end) {
57                 /* silly alpha */
58                 read_barrier_depends();
59                 op->func(ip, parent_ip);
60                 op = op->next;
61         };
62 }
63
64 /**
65  * clear_ftrace_function - reset the ftrace function
66  *
67  * This NULLs the ftrace function and in essence stops
68  * tracing.  There may be lag
69  */
70 void clear_ftrace_function(void)
71 {
72         ftrace_trace_function = ftrace_stub;
73 }
74
75 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
76 {
77         /* Should never be called by interrupts */
78         spin_lock(&ftrace_lock);
79
80         ops->next = ftrace_list;
81         /*
82          * We are entering ops into the ftrace_list but another
83          * CPU might be walking that list. We need to make sure
84          * the ops->next pointer is valid before another CPU sees
85          * the ops pointer included into the ftrace_list.
86          */
87         smp_wmb();
88         ftrace_list = ops;
89
90         if (ftrace_enabled) {
91                 /*
92                  * For one func, simply call it directly.
93                  * For more than one func, call the chain.
94                  */
95                 if (ops->next == &ftrace_list_end)
96                         ftrace_trace_function = ops->func;
97                 else
98                         ftrace_trace_function = ftrace_list_func;
99         }
100
101         spin_unlock(&ftrace_lock);
102
103         return 0;
104 }
105
106 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
107 {
108         struct ftrace_ops **p;
109         int ret = 0;
110
111         spin_lock(&ftrace_lock);
112
113         /*
114          * If we are removing the last function, then simply point
115          * to the ftrace_stub.
116          */
117         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
118                 ftrace_trace_function = ftrace_stub;
119                 ftrace_list = &ftrace_list_end;
120                 goto out;
121         }
122
123         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
124                 if (*p == ops)
125                         break;
126
127         if (*p != ops) {
128                 ret = -1;
129                 goto out;
130         }
131
132         *p = (*p)->next;
133
134         if (ftrace_enabled) {
135                 /* If we only have one func left, then call that directly */
136                 if (ftrace_list == &ftrace_list_end ||
137                     ftrace_list->next == &ftrace_list_end)
138                         ftrace_trace_function = ftrace_list->func;
139         }
140
141  out:
142         spin_unlock(&ftrace_lock);
143
144         return ret;
145 }
146
147 #ifdef CONFIG_DYNAMIC_FTRACE
148
149 static struct task_struct *ftraced_task;
150 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
151 static unsigned long ftraced_iteration_counter;
152
153 enum {
154         FTRACE_ENABLE_CALLS             = (1 << 0),
155         FTRACE_DISABLE_CALLS            = (1 << 1),
156         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
157         FTRACE_ENABLE_MCOUNT            = (1 << 3),
158         FTRACE_DISABLE_MCOUNT           = (1 << 4),
159 };
160
161 static int ftrace_filtered;
162
163 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
164
165 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
166
167 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
168 static DEFINE_MUTEX(ftraced_lock);
169 static DEFINE_MUTEX(ftrace_filter_lock);
170
171 struct ftrace_page {
172         struct ftrace_page      *next;
173         int                     index;
174         struct dyn_ftrace       records[];
175 } __attribute__((packed));
176
177 #define ENTRIES_PER_PAGE \
178   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
179
180 /* estimate from running different kernels */
181 #define NR_TO_INIT              10000
182
183 static struct ftrace_page       *ftrace_pages_start;
184 static struct ftrace_page       *ftrace_pages;
185
186 static int ftraced_trigger;
187 static int ftraced_suspend;
188
189 static int ftrace_record_suspend;
190
191 static inline int
192 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
193 {
194         struct dyn_ftrace *p;
195         struct hlist_node *t;
196         int found = 0;
197
198         hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
199                 if (p->ip == ip) {
200                         found = 1;
201                         break;
202                 }
203         }
204
205         return found;
206 }
207
208 static inline void notrace
209 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
210 {
211         hlist_add_head(&node->node, &ftrace_hash[key]);
212 }
213
214 static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
215 {
216         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
217                 if (!ftrace_pages->next)
218                         return NULL;
219                 ftrace_pages = ftrace_pages->next;
220         }
221
222         return &ftrace_pages->records[ftrace_pages->index++];
223 }
224
225 static void notrace
226 ftrace_record_ip(unsigned long ip)
227 {
228         struct dyn_ftrace *node;
229         unsigned long flags;
230         unsigned long key;
231         int resched;
232         int atomic;
233
234         if (!ftrace_enabled)
235                 return;
236
237         resched = need_resched();
238         preempt_disable_notrace();
239
240         /* We simply need to protect against recursion */
241         __get_cpu_var(ftrace_shutdown_disable_cpu)++;
242         if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
243                 goto out;
244
245         if (unlikely(ftrace_record_suspend))
246                 goto out;
247
248         key = hash_long(ip, FTRACE_HASHBITS);
249
250         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
251
252         if (ftrace_ip_in_hash(ip, key))
253                 goto out;
254
255         atomic = irqs_disabled();
256
257         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
258
259         /* This ip may have hit the hash before the lock */
260         if (ftrace_ip_in_hash(ip, key))
261                 goto out_unlock;
262
263         /*
264          * There's a slight race that the ftraced will update the
265          * hash and reset here. If it is already converted, skip it.
266          */
267         if (ftrace_ip_converted(ip))
268                 goto out_unlock;
269
270         node = ftrace_alloc_dyn_node(ip);
271         if (!node)
272                 goto out_unlock;
273
274         node->ip = ip;
275
276         ftrace_add_hash(node, key);
277
278         ftraced_trigger = 1;
279
280  out_unlock:
281         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
282  out:
283         __get_cpu_var(ftrace_shutdown_disable_cpu)--;
284
285         /* prevent recursion with scheduler */
286         if (resched)
287                 preempt_enable_no_resched_notrace();
288         else
289                 preempt_enable_notrace();
290 }
291
292 #define FTRACE_ADDR ((long)(&ftrace_caller))
293 #define MCOUNT_ADDR ((long)(&mcount))
294
295 static void notrace
296 __ftrace_replace_code(struct dyn_ftrace *rec,
297                       unsigned char *old, unsigned char *new, int enable)
298 {
299         unsigned long ip;
300         int failed;
301
302         ip = rec->ip;
303
304         if (ftrace_filtered && enable) {
305                 unsigned long fl;
306                 /*
307                  * If filtering is on:
308                  *
309                  * If this record is set to be filtered and
310                  * is enabled then do nothing.
311                  *
312                  * If this record is set to be filtered and
313                  * it is not enabled, enable it.
314                  *
315                  * If this record is not set to be filtered
316                  * and it is not enabled do nothing.
317                  *
318                  * If this record is not set to be filtered and
319                  * it is enabled, disable it.
320                  */
321                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
322
323                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
324                     (fl == 0))
325                         return;
326
327                 /*
328                  * If it is enabled disable it,
329                  * otherwise enable it!
330                  */
331                 if (fl == FTRACE_FL_ENABLED) {
332                         /* swap new and old */
333                         new = old;
334                         old = ftrace_call_replace(ip, FTRACE_ADDR);
335                         rec->flags &= ~FTRACE_FL_ENABLED;
336                 } else {
337                         new = ftrace_call_replace(ip, FTRACE_ADDR);
338                         rec->flags |= FTRACE_FL_ENABLED;
339                 }
340         } else {
341
342                 if (enable)
343                         new = ftrace_call_replace(ip, FTRACE_ADDR);
344                 else
345                         old = ftrace_call_replace(ip, FTRACE_ADDR);
346
347                 if (enable) {
348                         if (rec->flags & FTRACE_FL_ENABLED)
349                                 return;
350                         rec->flags |= FTRACE_FL_ENABLED;
351                 } else {
352                         if (!(rec->flags & FTRACE_FL_ENABLED))
353                                 return;
354                         rec->flags &= ~FTRACE_FL_ENABLED;
355                 }
356         }
357
358         failed = ftrace_modify_code(ip, old, new);
359         if (failed)
360                 rec->flags |= FTRACE_FL_FAILED;
361 }
362
363 static void notrace ftrace_replace_code(int enable)
364 {
365         unsigned char *new = NULL, *old = NULL;
366         struct dyn_ftrace *rec;
367         struct ftrace_page *pg;
368         int i;
369
370         if (enable)
371                 old = ftrace_nop_replace();
372         else
373                 new = ftrace_nop_replace();
374
375         for (pg = ftrace_pages_start; pg; pg = pg->next) {
376                 for (i = 0; i < pg->index; i++) {
377                         rec = &pg->records[i];
378
379                         /* don't modify code that has already faulted */
380                         if (rec->flags & FTRACE_FL_FAILED)
381                                 continue;
382
383                         __ftrace_replace_code(rec, old, new, enable);
384                 }
385         }
386 }
387
388 static notrace void ftrace_shutdown_replenish(void)
389 {
390         if (ftrace_pages->next)
391                 return;
392
393         /* allocate another page */
394         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
395 }
396
397 static notrace void
398 ftrace_code_disable(struct dyn_ftrace *rec)
399 {
400         unsigned long ip;
401         unsigned char *nop, *call;
402         int failed;
403
404         ip = rec->ip;
405
406         nop = ftrace_nop_replace();
407         call = ftrace_call_replace(ip, MCOUNT_ADDR);
408
409         failed = ftrace_modify_code(ip, call, nop);
410         if (failed)
411                 rec->flags |= FTRACE_FL_FAILED;
412 }
413
414 static int notrace __ftrace_modify_code(void *data)
415 {
416         unsigned long addr;
417         int *command = data;
418
419         if (*command & FTRACE_ENABLE_CALLS)
420                 ftrace_replace_code(1);
421         else if (*command & FTRACE_DISABLE_CALLS)
422                 ftrace_replace_code(0);
423
424         if (*command & FTRACE_UPDATE_TRACE_FUNC)
425                 ftrace_update_ftrace_func(ftrace_trace_function);
426
427         if (*command & FTRACE_ENABLE_MCOUNT) {
428                 addr = (unsigned long)ftrace_record_ip;
429                 ftrace_mcount_set(&addr);
430         } else if (*command & FTRACE_DISABLE_MCOUNT) {
431                 addr = (unsigned long)ftrace_stub;
432                 ftrace_mcount_set(&addr);
433         }
434
435         return 0;
436 }
437
438 static void notrace ftrace_run_update_code(int command)
439 {
440         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
441 }
442
443 static ftrace_func_t saved_ftrace_func;
444
445 static void notrace ftrace_startup(void)
446 {
447         int command = 0;
448
449         mutex_lock(&ftraced_lock);
450         ftraced_suspend++;
451         if (ftraced_suspend == 1)
452                 command |= FTRACE_ENABLE_CALLS;
453
454         if (saved_ftrace_func != ftrace_trace_function) {
455                 saved_ftrace_func = ftrace_trace_function;
456                 command |= FTRACE_UPDATE_TRACE_FUNC;
457         }
458
459         if (!command || !ftrace_enabled)
460                 goto out;
461
462         ftrace_run_update_code(command);
463  out:
464         mutex_unlock(&ftraced_lock);
465 }
466
467 static void notrace ftrace_shutdown(void)
468 {
469         int command = 0;
470
471         mutex_lock(&ftraced_lock);
472         ftraced_suspend--;
473         if (!ftraced_suspend)
474                 command |= FTRACE_DISABLE_CALLS;
475
476         if (saved_ftrace_func != ftrace_trace_function) {
477                 saved_ftrace_func = ftrace_trace_function;
478                 command |= FTRACE_UPDATE_TRACE_FUNC;
479         }
480
481         if (!command || !ftrace_enabled)
482                 goto out;
483
484         ftrace_run_update_code(command);
485  out:
486         mutex_unlock(&ftraced_lock);
487 }
488
489 static void notrace ftrace_startup_sysctl(void)
490 {
491         int command = FTRACE_ENABLE_MCOUNT;
492
493         mutex_lock(&ftraced_lock);
494         /* Force update next time */
495         saved_ftrace_func = NULL;
496         /* ftraced_suspend is true if we want ftrace running */
497         if (ftraced_suspend)
498                 command |= FTRACE_ENABLE_CALLS;
499
500         ftrace_run_update_code(command);
501         mutex_unlock(&ftraced_lock);
502 }
503
504 static void notrace ftrace_shutdown_sysctl(void)
505 {
506         int command = FTRACE_DISABLE_MCOUNT;
507
508         mutex_lock(&ftraced_lock);
509         /* ftraced_suspend is true if ftrace is running */
510         if (ftraced_suspend)
511                 command |= FTRACE_DISABLE_CALLS;
512
513         ftrace_run_update_code(command);
514         mutex_unlock(&ftraced_lock);
515 }
516
517 static cycle_t          ftrace_update_time;
518 static unsigned long    ftrace_update_cnt;
519 unsigned long           ftrace_update_tot_cnt;
520
521 static int notrace __ftrace_update_code(void *ignore)
522 {
523         struct dyn_ftrace *p;
524         struct hlist_head head;
525         struct hlist_node *t;
526         int save_ftrace_enabled;
527         cycle_t start, stop;
528         int i;
529
530         /* Don't be recording funcs now */
531         save_ftrace_enabled = ftrace_enabled;
532         ftrace_enabled = 0;
533
534         start = ftrace_now(raw_smp_processor_id());
535         ftrace_update_cnt = 0;
536
537         /* No locks needed, the machine is stopped! */
538         for (i = 0; i < FTRACE_HASHSIZE; i++) {
539                 if (hlist_empty(&ftrace_hash[i]))
540                         continue;
541
542                 head = ftrace_hash[i];
543                 INIT_HLIST_HEAD(&ftrace_hash[i]);
544
545                 /* all CPUS are stopped, we are safe to modify code */
546                 hlist_for_each_entry(p, t, &head, node) {
547                         ftrace_code_disable(p);
548                         ftrace_update_cnt++;
549                 }
550
551         }
552
553         stop = ftrace_now(raw_smp_processor_id());
554         ftrace_update_time = stop - start;
555         ftrace_update_tot_cnt += ftrace_update_cnt;
556
557         ftrace_enabled = save_ftrace_enabled;
558
559         return 0;
560 }
561
562 static void notrace ftrace_update_code(void)
563 {
564         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
565 }
566
567 static int notrace ftraced(void *ignore)
568 {
569         unsigned long usecs;
570
571         set_current_state(TASK_INTERRUPTIBLE);
572
573         while (!kthread_should_stop()) {
574
575                 /* check once a second */
576                 schedule_timeout(HZ);
577
578                 mutex_lock(&ftrace_sysctl_lock);
579                 mutex_lock(&ftraced_lock);
580                 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
581                         ftrace_record_suspend++;
582                         ftrace_update_code();
583                         usecs = nsecs_to_usecs(ftrace_update_time);
584                         if (ftrace_update_tot_cnt > 100000) {
585                                 ftrace_update_tot_cnt = 0;
586                                 pr_info("hm, dftrace overflow: %lu change%s"
587                                          " (%lu total) in %lu usec%s\n",
588                                         ftrace_update_cnt,
589                                         ftrace_update_cnt != 1 ? "s" : "",
590                                         ftrace_update_tot_cnt,
591                                         usecs, usecs != 1 ? "s" : "");
592                                 WARN_ON_ONCE(1);
593                         }
594                         ftraced_trigger = 0;
595                         ftrace_record_suspend--;
596                 }
597                 ftraced_iteration_counter++;
598                 mutex_unlock(&ftraced_lock);
599                 mutex_unlock(&ftrace_sysctl_lock);
600
601                 wake_up_interruptible(&ftraced_waiters);
602
603                 ftrace_shutdown_replenish();
604
605                 set_current_state(TASK_INTERRUPTIBLE);
606         }
607         __set_current_state(TASK_RUNNING);
608         return 0;
609 }
610
611 static int __init ftrace_dyn_table_alloc(void)
612 {
613         struct ftrace_page *pg;
614         int cnt;
615         int i;
616
617         /* allocate a few pages */
618         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
619         if (!ftrace_pages_start)
620                 return -1;
621
622         /*
623          * Allocate a few more pages.
624          *
625          * TODO: have some parser search vmlinux before
626          *   final linking to find all calls to ftrace.
627          *   Then we can:
628          *    a) know how many pages to allocate.
629          *     and/or
630          *    b) set up the table then.
631          *
632          *  The dynamic code is still necessary for
633          *  modules.
634          */
635
636         pg = ftrace_pages = ftrace_pages_start;
637
638         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
639
640         for (i = 0; i < cnt; i++) {
641                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
642
643                 /* If we fail, we'll try later anyway */
644                 if (!pg->next)
645                         break;
646
647                 pg = pg->next;
648         }
649
650         return 0;
651 }
652
653 enum {
654         FTRACE_ITER_FILTER      = (1 << 0),
655         FTRACE_ITER_CONT        = (1 << 1),
656 };
657
658 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
659
660 struct ftrace_iterator {
661         loff_t                  pos;
662         struct ftrace_page      *pg;
663         unsigned                idx;
664         unsigned                flags;
665         unsigned char           buffer[FTRACE_BUFF_MAX+1];
666         unsigned                buffer_idx;
667         unsigned                filtered;
668 };
669
670 static void notrace *
671 t_next(struct seq_file *m, void *v, loff_t *pos)
672 {
673         struct ftrace_iterator *iter = m->private;
674         struct dyn_ftrace *rec = NULL;
675
676         (*pos)++;
677
678  retry:
679         if (iter->idx >= iter->pg->index) {
680                 if (iter->pg->next) {
681                         iter->pg = iter->pg->next;
682                         iter->idx = 0;
683                         goto retry;
684                 }
685         } else {
686                 rec = &iter->pg->records[iter->idx++];
687                 if ((rec->flags & FTRACE_FL_FAILED) ||
688                     ((iter->flags & FTRACE_ITER_FILTER) &&
689                      !(rec->flags & FTRACE_FL_FILTER))) {
690                         rec = NULL;
691                         goto retry;
692                 }
693         }
694
695         iter->pos = *pos;
696
697         return rec;
698 }
699
700 static void *t_start(struct seq_file *m, loff_t *pos)
701 {
702         struct ftrace_iterator *iter = m->private;
703         void *p = NULL;
704         loff_t l = -1;
705
706         if (*pos != iter->pos) {
707                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
708                         ;
709         } else {
710                 l = *pos;
711                 p = t_next(m, p, &l);
712         }
713
714         return p;
715 }
716
717 static void t_stop(struct seq_file *m, void *p)
718 {
719 }
720
721 static int t_show(struct seq_file *m, void *v)
722 {
723         struct dyn_ftrace *rec = v;
724         char str[KSYM_SYMBOL_LEN];
725
726         if (!rec)
727                 return 0;
728
729         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
730
731         seq_printf(m, "%s\n", str);
732
733         return 0;
734 }
735
736 static struct seq_operations show_ftrace_seq_ops = {
737         .start = t_start,
738         .next = t_next,
739         .stop = t_stop,
740         .show = t_show,
741 };
742
743 static int notrace
744 ftrace_avail_open(struct inode *inode, struct file *file)
745 {
746         struct ftrace_iterator *iter;
747         int ret;
748
749         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
750         if (!iter)
751                 return -ENOMEM;
752
753         iter->pg = ftrace_pages_start;
754         iter->pos = -1;
755
756         ret = seq_open(file, &show_ftrace_seq_ops);
757         if (!ret) {
758                 struct seq_file *m = file->private_data;
759
760                 m->private = iter;
761         } else {
762                 kfree(iter);
763         }
764
765         return ret;
766 }
767
768 int ftrace_avail_release(struct inode *inode, struct file *file)
769 {
770         struct seq_file *m = (struct seq_file *)file->private_data;
771         struct ftrace_iterator *iter = m->private;
772
773         seq_release(inode, file);
774         kfree(iter);
775
776         return 0;
777 }
778
779 static void notrace ftrace_filter_reset(void)
780 {
781         struct ftrace_page *pg;
782         struct dyn_ftrace *rec;
783         unsigned i;
784
785         /* keep kstop machine from running */
786         preempt_disable();
787         ftrace_filtered = 0;
788         pg = ftrace_pages_start;
789         while (pg) {
790                 for (i = 0; i < pg->index; i++) {
791                         rec = &pg->records[i];
792                         if (rec->flags & FTRACE_FL_FAILED)
793                                 continue;
794                         rec->flags &= ~FTRACE_FL_FILTER;
795                 }
796                 pg = pg->next;
797         }
798         preempt_enable();
799 }
800
801 static int notrace
802 ftrace_filter_open(struct inode *inode, struct file *file)
803 {
804         struct ftrace_iterator *iter;
805         int ret = 0;
806
807         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
808         if (!iter)
809                 return -ENOMEM;
810
811         mutex_lock(&ftrace_filter_lock);
812         if ((file->f_mode & FMODE_WRITE) &&
813             !(file->f_flags & O_APPEND))
814                 ftrace_filter_reset();
815
816         if (file->f_mode & FMODE_READ) {
817                 iter->pg = ftrace_pages_start;
818                 iter->pos = -1;
819                 iter->flags = FTRACE_ITER_FILTER;
820
821                 ret = seq_open(file, &show_ftrace_seq_ops);
822                 if (!ret) {
823                         struct seq_file *m = file->private_data;
824                         m->private = iter;
825                 } else
826                         kfree(iter);
827         } else
828                 file->private_data = iter;
829         mutex_unlock(&ftrace_filter_lock);
830
831         return ret;
832 }
833
834 static ssize_t notrace
835 ftrace_filter_read(struct file *file, char __user *ubuf,
836                        size_t cnt, loff_t *ppos)
837 {
838         if (file->f_mode & FMODE_READ)
839                 return seq_read(file, ubuf, cnt, ppos);
840         else
841                 return -EPERM;
842 }
843
844 static loff_t notrace
845 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
846 {
847         loff_t ret;
848
849         if (file->f_mode & FMODE_READ)
850                 ret = seq_lseek(file, offset, origin);
851         else
852                 file->f_pos = ret = 1;
853
854         return ret;
855 }
856
857 enum {
858         MATCH_FULL,
859         MATCH_FRONT_ONLY,
860         MATCH_MIDDLE_ONLY,
861         MATCH_END_ONLY,
862 };
863
864 static void notrace
865 ftrace_match(unsigned char *buff, int len)
866 {
867         char str[KSYM_SYMBOL_LEN];
868         char *search = NULL;
869         struct ftrace_page *pg;
870         struct dyn_ftrace *rec;
871         int type = MATCH_FULL;
872         unsigned i, match = 0, search_len = 0;
873
874         for (i = 0; i < len; i++) {
875                 if (buff[i] == '*') {
876                         if (!i) {
877                                 search = buff + i + 1;
878                                 type = MATCH_END_ONLY;
879                                 search_len = len - (i + 1);
880                         } else {
881                                 if (type == MATCH_END_ONLY) {
882                                         type = MATCH_MIDDLE_ONLY;
883                                 } else {
884                                         match = i;
885                                         type = MATCH_FRONT_ONLY;
886                                 }
887                                 buff[i] = 0;
888                                 break;
889                         }
890                 }
891         }
892
893         /* keep kstop machine from running */
894         preempt_disable();
895         ftrace_filtered = 1;
896         pg = ftrace_pages_start;
897         while (pg) {
898                 for (i = 0; i < pg->index; i++) {
899                         int matched = 0;
900                         char *ptr;
901
902                         rec = &pg->records[i];
903                         if (rec->flags & FTRACE_FL_FAILED)
904                                 continue;
905                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
906                         switch (type) {
907                         case MATCH_FULL:
908                                 if (strcmp(str, buff) == 0)
909                                         matched = 1;
910                                 break;
911                         case MATCH_FRONT_ONLY:
912                                 if (memcmp(str, buff, match) == 0)
913                                         matched = 1;
914                                 break;
915                         case MATCH_MIDDLE_ONLY:
916                                 if (strstr(str, search))
917                                         matched = 1;
918                                 break;
919                         case MATCH_END_ONLY:
920                                 ptr = strstr(str, search);
921                                 if (ptr && (ptr[search_len] == 0))
922                                         matched = 1;
923                                 break;
924                         }
925                         if (matched)
926                                 rec->flags |= FTRACE_FL_FILTER;
927                 }
928                 pg = pg->next;
929         }
930         preempt_enable();
931 }
932
933 static ssize_t notrace
934 ftrace_filter_write(struct file *file, const char __user *ubuf,
935                     size_t cnt, loff_t *ppos)
936 {
937         struct ftrace_iterator *iter;
938         char ch;
939         size_t read = 0;
940         ssize_t ret;
941
942         if (!cnt || cnt < 0)
943                 return 0;
944
945         mutex_lock(&ftrace_filter_lock);
946
947         if (file->f_mode & FMODE_READ) {
948                 struct seq_file *m = file->private_data;
949                 iter = m->private;
950         } else
951                 iter = file->private_data;
952
953         if (!*ppos) {
954                 iter->flags &= ~FTRACE_ITER_CONT;
955                 iter->buffer_idx = 0;
956         }
957
958         ret = get_user(ch, ubuf++);
959         if (ret)
960                 goto out;
961         read++;
962         cnt--;
963
964         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
965                 /* skip white space */
966                 while (cnt && isspace(ch)) {
967                         ret = get_user(ch, ubuf++);
968                         if (ret)
969                                 goto out;
970                         read++;
971                         cnt--;
972                 }
973
974
975                 if (isspace(ch)) {
976                         file->f_pos += read;
977                         ret = read;
978                         goto out;
979                 }
980
981                 iter->buffer_idx = 0;
982         }
983
984         while (cnt && !isspace(ch)) {
985                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
986                         iter->buffer[iter->buffer_idx++] = ch;
987                 else {
988                         ret = -EINVAL;
989                         goto out;
990                 }
991                 ret = get_user(ch, ubuf++);
992                 if (ret)
993                         goto out;
994                 read++;
995                 cnt--;
996         }
997
998         if (isspace(ch)) {
999                 iter->filtered++;
1000                 iter->buffer[iter->buffer_idx] = 0;
1001                 ftrace_match(iter->buffer, iter->buffer_idx);
1002                 iter->buffer_idx = 0;
1003         } else
1004                 iter->flags |= FTRACE_ITER_CONT;
1005
1006
1007         file->f_pos += read;
1008
1009         ret = read;
1010  out:
1011         mutex_unlock(&ftrace_filter_lock);
1012
1013         return ret;
1014 }
1015
1016 /**
1017  * ftrace_set_filter - set a function to filter on in ftrace
1018  * @buf - the string that holds the function filter text.
1019  * @len - the length of the string.
1020  * @reset - non zero to reset all filters before applying this filter.
1021  *
1022  * Filters denote which functions should be enabled when tracing is enabled.
1023  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1024  */
1025 notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
1026 {
1027         mutex_lock(&ftrace_filter_lock);
1028         if (reset)
1029                 ftrace_filter_reset();
1030         if (buf)
1031                 ftrace_match(buf, len);
1032         mutex_unlock(&ftrace_filter_lock);
1033 }
1034
1035 static int notrace
1036 ftrace_filter_release(struct inode *inode, struct file *file)
1037 {
1038         struct seq_file *m = (struct seq_file *)file->private_data;
1039         struct ftrace_iterator *iter;
1040
1041         mutex_lock(&ftrace_filter_lock);
1042         if (file->f_mode & FMODE_READ) {
1043                 iter = m->private;
1044
1045                 seq_release(inode, file);
1046         } else
1047                 iter = file->private_data;
1048
1049         if (iter->buffer_idx) {
1050                 iter->filtered++;
1051                 iter->buffer[iter->buffer_idx] = 0;
1052                 ftrace_match(iter->buffer, iter->buffer_idx);
1053         }
1054
1055         mutex_lock(&ftrace_sysctl_lock);
1056         mutex_lock(&ftraced_lock);
1057         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1058                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1059         mutex_unlock(&ftraced_lock);
1060         mutex_unlock(&ftrace_sysctl_lock);
1061
1062         kfree(iter);
1063         mutex_unlock(&ftrace_filter_lock);
1064         return 0;
1065 }
1066
1067 static struct file_operations ftrace_avail_fops = {
1068         .open = ftrace_avail_open,
1069         .read = seq_read,
1070         .llseek = seq_lseek,
1071         .release = ftrace_avail_release,
1072 };
1073
1074 static struct file_operations ftrace_filter_fops = {
1075         .open = ftrace_filter_open,
1076         .read = ftrace_filter_read,
1077         .write = ftrace_filter_write,
1078         .llseek = ftrace_filter_lseek,
1079         .release = ftrace_filter_release,
1080 };
1081
1082 /**
1083  * ftrace_force_update - force an update to all recording ftrace functions
1084  *
1085  * The ftrace dynamic update daemon only wakes up once a second.
1086  * There may be cases where an update needs to be done immediately
1087  * for tests or internal kernel tracing to begin. This function
1088  * wakes the daemon to do an update and will not return until the
1089  * update is complete.
1090  */
1091 int ftrace_force_update(void)
1092 {
1093         unsigned long last_counter;
1094         DECLARE_WAITQUEUE(wait, current);
1095         int ret = 0;
1096
1097         if (!ftraced_task)
1098                 return -ENODEV;
1099
1100         mutex_lock(&ftraced_lock);
1101         last_counter = ftraced_iteration_counter;
1102
1103         set_current_state(TASK_INTERRUPTIBLE);
1104         add_wait_queue(&ftraced_waiters, &wait);
1105
1106         do {
1107                 mutex_unlock(&ftraced_lock);
1108                 wake_up_process(ftraced_task);
1109                 schedule();
1110                 mutex_lock(&ftraced_lock);
1111                 if (signal_pending(current)) {
1112                         ret = -EINTR;
1113                         break;
1114                 }
1115                 set_current_state(TASK_INTERRUPTIBLE);
1116         } while (last_counter == ftraced_iteration_counter);
1117
1118         mutex_unlock(&ftraced_lock);
1119         remove_wait_queue(&ftraced_waiters, &wait);
1120         set_current_state(TASK_RUNNING);
1121
1122         return ret;
1123 }
1124
1125 static __init int ftrace_init_debugfs(void)
1126 {
1127         struct dentry *d_tracer;
1128         struct dentry *entry;
1129
1130         d_tracer = tracing_init_dentry();
1131
1132         entry = debugfs_create_file("available_filter_functions", 0444,
1133                                     d_tracer, NULL, &ftrace_avail_fops);
1134         if (!entry)
1135                 pr_warning("Could not create debugfs "
1136                            "'available_filter_functions' entry\n");
1137
1138         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1139                                     NULL, &ftrace_filter_fops);
1140         if (!entry)
1141                 pr_warning("Could not create debugfs "
1142                            "'set_ftrace_filter' entry\n");
1143         return 0;
1144 }
1145
1146 fs_initcall(ftrace_init_debugfs);
1147
1148 static int __init notrace ftrace_dynamic_init(void)
1149 {
1150         struct task_struct *p;
1151         unsigned long addr;
1152         int ret;
1153
1154         addr = (unsigned long)ftrace_record_ip;
1155         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1156
1157         /* ftrace_dyn_arch_init places the return code in addr */
1158         if (addr)
1159                 return addr;
1160
1161         ret = ftrace_dyn_table_alloc();
1162         if (ret)
1163                 return ret;
1164
1165         p = kthread_run(ftraced, NULL, "ftraced");
1166         if (IS_ERR(p))
1167                 return -1;
1168
1169         last_ftrace_enabled = ftrace_enabled = 1;
1170         ftraced_task = p;
1171
1172         return 0;
1173 }
1174
1175 core_initcall(ftrace_dynamic_init);
1176 #else
1177 # define ftrace_startup()               do { } while (0)
1178 # define ftrace_shutdown()              do { } while (0)
1179 # define ftrace_startup_sysctl()        do { } while (0)
1180 # define ftrace_shutdown_sysctl()       do { } while (0)
1181 #endif /* CONFIG_DYNAMIC_FTRACE */
1182
1183 /**
1184  * register_ftrace_function - register a function for profiling
1185  * @ops - ops structure that holds the function for profiling.
1186  *
1187  * Register a function to be called by all functions in the
1188  * kernel.
1189  *
1190  * Note: @ops->func and all the functions it calls must be labeled
1191  *       with "notrace", otherwise it will go into a
1192  *       recursive loop.
1193  */
1194 int register_ftrace_function(struct ftrace_ops *ops)
1195 {
1196         int ret;
1197
1198         mutex_lock(&ftrace_sysctl_lock);
1199         ret = __register_ftrace_function(ops);
1200         ftrace_startup();
1201         mutex_unlock(&ftrace_sysctl_lock);
1202
1203         return ret;
1204 }
1205
1206 /**
1207  * unregister_ftrace_function - unresgister a function for profiling.
1208  * @ops - ops structure that holds the function to unregister
1209  *
1210  * Unregister a function that was added to be called by ftrace profiling.
1211  */
1212 int unregister_ftrace_function(struct ftrace_ops *ops)
1213 {
1214         int ret;
1215
1216         mutex_lock(&ftrace_sysctl_lock);
1217         ret = __unregister_ftrace_function(ops);
1218         ftrace_shutdown();
1219         mutex_unlock(&ftrace_sysctl_lock);
1220
1221         return ret;
1222 }
1223
1224 notrace int
1225 ftrace_enable_sysctl(struct ctl_table *table, int write,
1226                      struct file *file, void __user *buffer, size_t *lenp,
1227                      loff_t *ppos)
1228 {
1229         int ret;
1230
1231         mutex_lock(&ftrace_sysctl_lock);
1232
1233         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1234
1235         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1236                 goto out;
1237
1238         last_ftrace_enabled = ftrace_enabled;
1239
1240         if (ftrace_enabled) {
1241
1242                 ftrace_startup_sysctl();
1243
1244                 /* we are starting ftrace again */
1245                 if (ftrace_list != &ftrace_list_end) {
1246                         if (ftrace_list->next == &ftrace_list_end)
1247                                 ftrace_trace_function = ftrace_list->func;
1248                         else
1249                                 ftrace_trace_function = ftrace_list_func;
1250                 }
1251
1252         } else {
1253                 /* stopping ftrace calls (just send to ftrace_stub) */
1254                 ftrace_trace_function = ftrace_stub;
1255
1256                 ftrace_shutdown_sysctl();
1257         }
1258
1259  out:
1260         mutex_unlock(&ftrace_sysctl_lock);
1261         return ret;
1262 }