]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: mcount_addr defined but not used
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* should not be called from interrupt context */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         /* should not be called from interrupt context */
119         spin_lock(&ftrace_lock);
120
121         /*
122          * If we are removing the last function, then simply point
123          * to the ftrace_stub.
124          */
125         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126                 ftrace_trace_function = ftrace_stub;
127                 ftrace_list = &ftrace_list_end;
128                 goto out;
129         }
130
131         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132                 if (*p == ops)
133                         break;
134
135         if (*p != ops) {
136                 ret = -1;
137                 goto out;
138         }
139
140         *p = (*p)->next;
141
142         if (ftrace_enabled) {
143                 /* If we only have one func left, then call that directly */
144                 if (ftrace_list == &ftrace_list_end ||
145                     ftrace_list->next == &ftrace_list_end)
146                         ftrace_trace_function = ftrace_list->func;
147         }
148
149  out:
150         spin_unlock(&ftrace_lock);
151
152         return ret;
153 }
154
155 #ifdef CONFIG_DYNAMIC_FTRACE
156
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
158 /*
159  * The hash lock is only needed when the recording of the mcount
160  * callers are dynamic. That is, by the caller themselves and
161  * not recorded via the compilation.
162  */
163 static DEFINE_SPINLOCK(ftrace_hash_lock);
164 #define ftrace_hash_lock(flags)   spin_lock_irqsave(&ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) \
166                         spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167 #else
168 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
169 #define ftrace_hash_lock(flags)   do { (void)(flags); } while (0)
170 #define ftrace_hash_unlock(flags) do { } while(0)
171 #endif
172
173 /*
174  * Since MCOUNT_ADDR may point to mcount itself, we do not want
175  * to get it confused by reading a reference in the code as we
176  * are parsing on objcopy output of text. Use a variable for
177  * it instead.
178  */
179 static unsigned long mcount_addr = MCOUNT_ADDR;
180
181 static struct task_struct *ftraced_task;
182
183 enum {
184         FTRACE_ENABLE_CALLS             = (1 << 0),
185         FTRACE_DISABLE_CALLS            = (1 << 1),
186         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
187         FTRACE_ENABLE_MCOUNT            = (1 << 3),
188         FTRACE_DISABLE_MCOUNT           = (1 << 4),
189 };
190
191 static int ftrace_filtered;
192 static int tracing_on;
193 static int frozen_record_count;
194
195 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
196
197 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
198
199 static DEFINE_MUTEX(ftraced_lock);
200 static DEFINE_MUTEX(ftrace_regex_lock);
201
202 struct ftrace_page {
203         struct ftrace_page      *next;
204         unsigned long           index;
205         struct dyn_ftrace       records[];
206 };
207
208 #define ENTRIES_PER_PAGE \
209   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
210
211 /* estimate from running different kernels */
212 #define NR_TO_INIT              10000
213
214 static struct ftrace_page       *ftrace_pages_start;
215 static struct ftrace_page       *ftrace_pages;
216
217 static int ftraced_trigger;
218 static int ftraced_suspend;
219 static int ftraced_stop;
220
221 static int ftrace_record_suspend;
222
223 static struct dyn_ftrace *ftrace_free_records;
224
225
226 #ifdef CONFIG_KPROBES
227 static inline void freeze_record(struct dyn_ftrace *rec)
228 {
229         if (!(rec->flags & FTRACE_FL_FROZEN)) {
230                 rec->flags |= FTRACE_FL_FROZEN;
231                 frozen_record_count++;
232         }
233 }
234
235 static inline void unfreeze_record(struct dyn_ftrace *rec)
236 {
237         if (rec->flags & FTRACE_FL_FROZEN) {
238                 rec->flags &= ~FTRACE_FL_FROZEN;
239                 frozen_record_count--;
240         }
241 }
242
243 static inline int record_frozen(struct dyn_ftrace *rec)
244 {
245         return rec->flags & FTRACE_FL_FROZEN;
246 }
247 #else
248 # define freeze_record(rec)                     ({ 0; })
249 # define unfreeze_record(rec)                   ({ 0; })
250 # define record_frozen(rec)                     ({ 0; })
251 #endif /* CONFIG_KPROBES */
252
253 int skip_trace(unsigned long ip)
254 {
255         unsigned long fl;
256         struct dyn_ftrace *rec;
257         struct hlist_node *t;
258         struct hlist_head *head;
259
260         if (frozen_record_count == 0)
261                 return 0;
262
263         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
264         hlist_for_each_entry_rcu(rec, t, head, node) {
265                 if (rec->ip == ip) {
266                         if (record_frozen(rec)) {
267                                 if (rec->flags & FTRACE_FL_FAILED)
268                                         return 1;
269
270                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
271                                         return 1;
272
273                                 if (!tracing_on || !ftrace_enabled)
274                                         return 1;
275
276                                 if (ftrace_filtered) {
277                                         fl = rec->flags & (FTRACE_FL_FILTER |
278                                                            FTRACE_FL_NOTRACE);
279                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
280                                                 return 1;
281                                 }
282                         }
283                         break;
284                 }
285         }
286
287         return 0;
288 }
289
290 static inline int
291 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
292 {
293         struct dyn_ftrace *p;
294         struct hlist_node *t;
295         int found = 0;
296
297         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
298                 if (p->ip == ip) {
299                         found = 1;
300                         break;
301                 }
302         }
303
304         return found;
305 }
306
307 static inline void
308 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
309 {
310         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
311 }
312
313 /* called from kstop_machine */
314 static inline void ftrace_del_hash(struct dyn_ftrace *node)
315 {
316         hlist_del(&node->node);
317 }
318
319 static void ftrace_free_rec(struct dyn_ftrace *rec)
320 {
321         rec->ip = (unsigned long)ftrace_free_records;
322         ftrace_free_records = rec;
323         rec->flags |= FTRACE_FL_FREE;
324 }
325
326 void ftrace_release(void *start, unsigned long size)
327 {
328         struct dyn_ftrace *rec;
329         struct ftrace_page *pg;
330         unsigned long s = (unsigned long)start;
331         unsigned long e = s + size;
332         int i;
333
334         if (ftrace_disabled || !start)
335                 return;
336
337         /* should not be called from interrupt context */
338         spin_lock(&ftrace_lock);
339
340         for (pg = ftrace_pages_start; pg; pg = pg->next) {
341                 for (i = 0; i < pg->index; i++) {
342                         rec = &pg->records[i];
343
344                         if ((rec->ip >= s) && (rec->ip < e))
345                                 ftrace_free_rec(rec);
346                 }
347         }
348         spin_unlock(&ftrace_lock);
349
350 }
351
352 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
353 {
354         struct dyn_ftrace *rec;
355
356         /* First check for freed records */
357         if (ftrace_free_records) {
358                 rec = ftrace_free_records;
359
360                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
361                         WARN_ON_ONCE(1);
362                         ftrace_free_records = NULL;
363                         ftrace_disabled = 1;
364                         ftrace_enabled = 0;
365                         return NULL;
366                 }
367
368                 ftrace_free_records = (void *)rec->ip;
369                 memset(rec, 0, sizeof(*rec));
370                 return rec;
371         }
372
373         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
374                 if (!ftrace_pages->next)
375                         return NULL;
376                 ftrace_pages = ftrace_pages->next;
377         }
378
379         return &ftrace_pages->records[ftrace_pages->index++];
380 }
381
382 static void
383 ftrace_record_ip(unsigned long ip)
384 {
385         struct dyn_ftrace *node;
386         unsigned long flags;
387         unsigned long key;
388         int resched;
389         int cpu;
390
391         if (!ftrace_enabled || ftrace_disabled)
392                 return;
393
394         resched = need_resched();
395         preempt_disable_notrace();
396
397         /*
398          * We simply need to protect against recursion.
399          * Use the the raw version of smp_processor_id and not
400          * __get_cpu_var which can call debug hooks that can
401          * cause a recursive crash here.
402          */
403         cpu = raw_smp_processor_id();
404         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
405         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
406                 goto out;
407
408         if (unlikely(ftrace_record_suspend))
409                 goto out;
410
411         key = hash_long(ip, FTRACE_HASHBITS);
412
413         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
414
415         if (ftrace_ip_in_hash(ip, key))
416                 goto out;
417
418         ftrace_hash_lock(flags);
419
420         /* This ip may have hit the hash before the lock */
421         if (ftrace_ip_in_hash(ip, key))
422                 goto out_unlock;
423
424         node = ftrace_alloc_dyn_node(ip);
425         if (!node)
426                 goto out_unlock;
427
428         node->ip = ip;
429
430         ftrace_add_hash(node, key);
431
432         ftraced_trigger = 1;
433
434  out_unlock:
435         ftrace_hash_unlock(flags);
436  out:
437         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
438
439         /* prevent recursion with scheduler */
440         if (resched)
441                 preempt_enable_no_resched_notrace();
442         else
443                 preempt_enable_notrace();
444 }
445
446 #define FTRACE_ADDR ((long)(ftrace_caller))
447
448 static int
449 __ftrace_replace_code(struct dyn_ftrace *rec,
450                       unsigned char *old, unsigned char *new, int enable)
451 {
452         unsigned long ip, fl;
453
454         ip = rec->ip;
455
456         if (ftrace_filtered && enable) {
457                 /*
458                  * If filtering is on:
459                  *
460                  * If this record is set to be filtered and
461                  * is enabled then do nothing.
462                  *
463                  * If this record is set to be filtered and
464                  * it is not enabled, enable it.
465                  *
466                  * If this record is not set to be filtered
467                  * and it is not enabled do nothing.
468                  *
469                  * If this record is set not to trace then
470                  * do nothing.
471                  *
472                  * If this record is set not to trace and
473                  * it is enabled then disable it.
474                  *
475                  * If this record is not set to be filtered and
476                  * it is enabled, disable it.
477                  */
478
479                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
480                                    FTRACE_FL_ENABLED);
481
482                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
483                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
484                     !fl || (fl == FTRACE_FL_NOTRACE))
485                         return 0;
486
487                 /*
488                  * If it is enabled disable it,
489                  * otherwise enable it!
490                  */
491                 if (fl & FTRACE_FL_ENABLED) {
492                         /* swap new and old */
493                         new = old;
494                         old = ftrace_call_replace(ip, FTRACE_ADDR);
495                         rec->flags &= ~FTRACE_FL_ENABLED;
496                 } else {
497                         new = ftrace_call_replace(ip, FTRACE_ADDR);
498                         rec->flags |= FTRACE_FL_ENABLED;
499                 }
500         } else {
501
502                 if (enable) {
503                         /*
504                          * If this record is set not to trace and is
505                          * not enabled, do nothing.
506                          */
507                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
508                         if (fl == FTRACE_FL_NOTRACE)
509                                 return 0;
510
511                         new = ftrace_call_replace(ip, FTRACE_ADDR);
512                 } else
513                         old = ftrace_call_replace(ip, FTRACE_ADDR);
514
515                 if (enable) {
516                         if (rec->flags & FTRACE_FL_ENABLED)
517                                 return 0;
518                         rec->flags |= FTRACE_FL_ENABLED;
519                 } else {
520                         if (!(rec->flags & FTRACE_FL_ENABLED))
521                                 return 0;
522                         rec->flags &= ~FTRACE_FL_ENABLED;
523                 }
524         }
525
526         return ftrace_modify_code(ip, old, new);
527 }
528
529 static void ftrace_replace_code(int enable)
530 {
531         int i, failed;
532         unsigned char *new = NULL, *old = NULL;
533         struct dyn_ftrace *rec;
534         struct ftrace_page *pg;
535
536         if (enable)
537                 old = ftrace_nop_replace();
538         else
539                 new = ftrace_nop_replace();
540
541         for (pg = ftrace_pages_start; pg; pg = pg->next) {
542                 for (i = 0; i < pg->index; i++) {
543                         rec = &pg->records[i];
544
545                         /* don't modify code that has already faulted */
546                         if (rec->flags & FTRACE_FL_FAILED)
547                                 continue;
548
549                         /* ignore updates to this record's mcount site */
550                         if (get_kprobe((void *)rec->ip)) {
551                                 freeze_record(rec);
552                                 continue;
553                         } else {
554                                 unfreeze_record(rec);
555                         }
556
557                         failed = __ftrace_replace_code(rec, old, new, enable);
558                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
559                                 rec->flags |= FTRACE_FL_FAILED;
560                                 if ((system_state == SYSTEM_BOOTING) ||
561                                     !core_kernel_text(rec->ip)) {
562                                         ftrace_del_hash(rec);
563                                         ftrace_free_rec(rec);
564                                 }
565                         }
566                 }
567         }
568 }
569
570 static void ftrace_shutdown_replenish(void)
571 {
572         if (ftrace_pages->next)
573                 return;
574
575         /* allocate another page */
576         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
577 }
578
579 static int
580 ftrace_code_disable(struct dyn_ftrace *rec)
581 {
582         unsigned long ip;
583         unsigned char *nop, *call;
584         int failed;
585
586         ip = rec->ip;
587
588         nop = ftrace_nop_replace();
589         call = ftrace_call_replace(ip, mcount_addr);
590
591         failed = ftrace_modify_code(ip, call, nop);
592         if (failed) {
593                 rec->flags |= FTRACE_FL_FAILED;
594                 return 0;
595         }
596         return 1;
597 }
598
599 static int __ftrace_update_code(void *ignore);
600
601 static int __ftrace_modify_code(void *data)
602 {
603         unsigned long addr;
604         int *command = data;
605
606         if (*command & FTRACE_ENABLE_CALLS) {
607                 /*
608                  * Update any recorded ips now that we have the
609                  * machine stopped
610                  */
611                 __ftrace_update_code(NULL);
612                 ftrace_replace_code(1);
613                 tracing_on = 1;
614         } else if (*command & FTRACE_DISABLE_CALLS) {
615                 ftrace_replace_code(0);
616                 tracing_on = 0;
617         }
618
619         if (*command & FTRACE_UPDATE_TRACE_FUNC)
620                 ftrace_update_ftrace_func(ftrace_trace_function);
621
622         if (*command & FTRACE_ENABLE_MCOUNT) {
623                 addr = (unsigned long)ftrace_record_ip;
624                 ftrace_mcount_set(&addr);
625         } else if (*command & FTRACE_DISABLE_MCOUNT) {
626                 addr = (unsigned long)ftrace_stub;
627                 ftrace_mcount_set(&addr);
628         }
629
630         return 0;
631 }
632
633 static void ftrace_run_update_code(int command)
634 {
635         stop_machine(__ftrace_modify_code, &command, NULL);
636 }
637
638 void ftrace_disable_daemon(void)
639 {
640         /* Stop the daemon from calling kstop_machine */
641         mutex_lock(&ftraced_lock);
642         ftraced_stop = 1;
643         mutex_unlock(&ftraced_lock);
644
645         ftrace_force_update();
646 }
647
648 void ftrace_enable_daemon(void)
649 {
650         mutex_lock(&ftraced_lock);
651         ftraced_stop = 0;
652         mutex_unlock(&ftraced_lock);
653
654         ftrace_force_update();
655 }
656
657 static ftrace_func_t saved_ftrace_func;
658
659 static void ftrace_startup(void)
660 {
661         int command = 0;
662
663         if (unlikely(ftrace_disabled))
664                 return;
665
666         mutex_lock(&ftraced_lock);
667         ftraced_suspend++;
668         if (ftraced_suspend == 1)
669                 command |= FTRACE_ENABLE_CALLS;
670
671         if (saved_ftrace_func != ftrace_trace_function) {
672                 saved_ftrace_func = ftrace_trace_function;
673                 command |= FTRACE_UPDATE_TRACE_FUNC;
674         }
675
676         if (!command || !ftrace_enabled)
677                 goto out;
678
679         ftrace_run_update_code(command);
680  out:
681         mutex_unlock(&ftraced_lock);
682 }
683
684 static void ftrace_shutdown(void)
685 {
686         int command = 0;
687
688         if (unlikely(ftrace_disabled))
689                 return;
690
691         mutex_lock(&ftraced_lock);
692         ftraced_suspend--;
693         if (!ftraced_suspend)
694                 command |= FTRACE_DISABLE_CALLS;
695
696         if (saved_ftrace_func != ftrace_trace_function) {
697                 saved_ftrace_func = ftrace_trace_function;
698                 command |= FTRACE_UPDATE_TRACE_FUNC;
699         }
700
701         if (!command || !ftrace_enabled)
702                 goto out;
703
704         ftrace_run_update_code(command);
705  out:
706         mutex_unlock(&ftraced_lock);
707 }
708
709 static void ftrace_startup_sysctl(void)
710 {
711         int command = FTRACE_ENABLE_MCOUNT;
712
713         if (unlikely(ftrace_disabled))
714                 return;
715
716         mutex_lock(&ftraced_lock);
717         /* Force update next time */
718         saved_ftrace_func = NULL;
719         /* ftraced_suspend is true if we want ftrace running */
720         if (ftraced_suspend)
721                 command |= FTRACE_ENABLE_CALLS;
722
723         ftrace_run_update_code(command);
724         mutex_unlock(&ftraced_lock);
725 }
726
727 static void ftrace_shutdown_sysctl(void)
728 {
729         int command = FTRACE_DISABLE_MCOUNT;
730
731         if (unlikely(ftrace_disabled))
732                 return;
733
734         mutex_lock(&ftraced_lock);
735         /* ftraced_suspend is true if ftrace is running */
736         if (ftraced_suspend)
737                 command |= FTRACE_DISABLE_CALLS;
738
739         ftrace_run_update_code(command);
740         mutex_unlock(&ftraced_lock);
741 }
742
743 static cycle_t          ftrace_update_time;
744 static unsigned long    ftrace_update_cnt;
745 unsigned long           ftrace_update_tot_cnt;
746
747 static int __ftrace_update_code(void *ignore)
748 {
749         int i, save_ftrace_enabled;
750         cycle_t start, stop;
751         struct dyn_ftrace *p;
752         struct hlist_node *t, *n;
753         struct hlist_head *head, temp_list;
754
755         /* Don't be recording funcs now */
756         ftrace_record_suspend++;
757         save_ftrace_enabled = ftrace_enabled;
758         ftrace_enabled = 0;
759
760         start = ftrace_now(raw_smp_processor_id());
761         ftrace_update_cnt = 0;
762
763         /* No locks needed, the machine is stopped! */
764         for (i = 0; i < FTRACE_HASHSIZE; i++) {
765                 INIT_HLIST_HEAD(&temp_list);
766                 head = &ftrace_hash[i];
767
768                 /* all CPUS are stopped, we are safe to modify code */
769                 hlist_for_each_entry_safe(p, t, n, head, node) {
770                         /* Skip over failed records which have not been
771                          * freed. */
772                         if (p->flags & FTRACE_FL_FAILED)
773                                 continue;
774
775                         /* Unconverted records are always at the head of the
776                          * hash bucket. Once we encounter a converted record,
777                          * simply skip over to the next bucket. Saves ftraced
778                          * some processor cycles (ftrace does its bid for
779                          * global warming :-p ). */
780                         if (p->flags & (FTRACE_FL_CONVERTED))
781                                 break;
782
783                         /* Ignore updates to this record's mcount site.
784                          * Reintroduce this record at the head of this
785                          * bucket to attempt to "convert" it again if
786                          * the kprobe on it is unregistered before the
787                          * next run. */
788                         if (get_kprobe((void *)p->ip)) {
789                                 ftrace_del_hash(p);
790                                 INIT_HLIST_NODE(&p->node);
791                                 hlist_add_head(&p->node, &temp_list);
792                                 freeze_record(p);
793                                 continue;
794                         } else {
795                                 unfreeze_record(p);
796                         }
797
798                         /* convert record (i.e, patch mcount-call with NOP) */
799                         if (ftrace_code_disable(p)) {
800                                 p->flags |= FTRACE_FL_CONVERTED;
801                                 ftrace_update_cnt++;
802                         } else {
803                                 if ((system_state == SYSTEM_BOOTING) ||
804                                     !core_kernel_text(p->ip)) {
805                                         ftrace_del_hash(p);
806                                         ftrace_free_rec(p);
807                                 }
808                         }
809                 }
810
811                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
812                         hlist_del(&p->node);
813                         INIT_HLIST_NODE(&p->node);
814                         hlist_add_head(&p->node, head);
815                 }
816         }
817
818         stop = ftrace_now(raw_smp_processor_id());
819         ftrace_update_time = stop - start;
820         ftrace_update_tot_cnt += ftrace_update_cnt;
821         ftraced_trigger = 0;
822
823         ftrace_enabled = save_ftrace_enabled;
824         ftrace_record_suspend--;
825
826         return 0;
827 }
828
829 static int ftrace_update_code(void)
830 {
831         if (unlikely(ftrace_disabled) ||
832             !ftrace_enabled || !ftraced_trigger)
833                 return 0;
834
835         stop_machine(__ftrace_update_code, NULL, NULL);
836
837         return 1;
838 }
839
840 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
841 {
842         struct ftrace_page *pg;
843         int cnt;
844         int i;
845
846         /* allocate a few pages */
847         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
848         if (!ftrace_pages_start)
849                 return -1;
850
851         /*
852          * Allocate a few more pages.
853          *
854          * TODO: have some parser search vmlinux before
855          *   final linking to find all calls to ftrace.
856          *   Then we can:
857          *    a) know how many pages to allocate.
858          *     and/or
859          *    b) set up the table then.
860          *
861          *  The dynamic code is still necessary for
862          *  modules.
863          */
864
865         pg = ftrace_pages = ftrace_pages_start;
866
867         cnt = num_to_init / ENTRIES_PER_PAGE;
868         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
869                 num_to_init, cnt);
870
871         for (i = 0; i < cnt; i++) {
872                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
873
874                 /* If we fail, we'll try later anyway */
875                 if (!pg->next)
876                         break;
877
878                 pg = pg->next;
879         }
880
881         return 0;
882 }
883
884 enum {
885         FTRACE_ITER_FILTER      = (1 << 0),
886         FTRACE_ITER_CONT        = (1 << 1),
887         FTRACE_ITER_NOTRACE     = (1 << 2),
888         FTRACE_ITER_FAILURES    = (1 << 3),
889 };
890
891 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
892
893 struct ftrace_iterator {
894         loff_t                  pos;
895         struct ftrace_page      *pg;
896         unsigned                idx;
897         unsigned                flags;
898         unsigned char           buffer[FTRACE_BUFF_MAX+1];
899         unsigned                buffer_idx;
900         unsigned                filtered;
901 };
902
903 static void *
904 t_next(struct seq_file *m, void *v, loff_t *pos)
905 {
906         struct ftrace_iterator *iter = m->private;
907         struct dyn_ftrace *rec = NULL;
908
909         (*pos)++;
910
911         /* should not be called from interrupt context */
912         spin_lock(&ftrace_lock);
913  retry:
914         if (iter->idx >= iter->pg->index) {
915                 if (iter->pg->next) {
916                         iter->pg = iter->pg->next;
917                         iter->idx = 0;
918                         goto retry;
919                 }
920         } else {
921                 rec = &iter->pg->records[iter->idx++];
922                 if ((rec->flags & FTRACE_FL_FREE) ||
923
924                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
925                      (rec->flags & FTRACE_FL_FAILED)) ||
926
927                     ((iter->flags & FTRACE_ITER_FAILURES) &&
928                      !(rec->flags & FTRACE_FL_FAILED)) ||
929
930                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
931                      !(rec->flags & FTRACE_FL_NOTRACE))) {
932                         rec = NULL;
933                         goto retry;
934                 }
935         }
936         spin_unlock(&ftrace_lock);
937
938         iter->pos = *pos;
939
940         return rec;
941 }
942
943 static void *t_start(struct seq_file *m, loff_t *pos)
944 {
945         struct ftrace_iterator *iter = m->private;
946         void *p = NULL;
947         loff_t l = -1;
948
949         if (*pos != iter->pos) {
950                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
951                         ;
952         } else {
953                 l = *pos;
954                 p = t_next(m, p, &l);
955         }
956
957         return p;
958 }
959
960 static void t_stop(struct seq_file *m, void *p)
961 {
962 }
963
964 static int t_show(struct seq_file *m, void *v)
965 {
966         struct dyn_ftrace *rec = v;
967         char str[KSYM_SYMBOL_LEN];
968
969         if (!rec)
970                 return 0;
971
972         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
973
974         seq_printf(m, "%s\n", str);
975
976         return 0;
977 }
978
979 static struct seq_operations show_ftrace_seq_ops = {
980         .start = t_start,
981         .next = t_next,
982         .stop = t_stop,
983         .show = t_show,
984 };
985
986 static int
987 ftrace_avail_open(struct inode *inode, struct file *file)
988 {
989         struct ftrace_iterator *iter;
990         int ret;
991
992         if (unlikely(ftrace_disabled))
993                 return -ENODEV;
994
995         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
996         if (!iter)
997                 return -ENOMEM;
998
999         iter->pg = ftrace_pages_start;
1000         iter->pos = -1;
1001
1002         ret = seq_open(file, &show_ftrace_seq_ops);
1003         if (!ret) {
1004                 struct seq_file *m = file->private_data;
1005
1006                 m->private = iter;
1007         } else {
1008                 kfree(iter);
1009         }
1010
1011         return ret;
1012 }
1013
1014 int ftrace_avail_release(struct inode *inode, struct file *file)
1015 {
1016         struct seq_file *m = (struct seq_file *)file->private_data;
1017         struct ftrace_iterator *iter = m->private;
1018
1019         seq_release(inode, file);
1020         kfree(iter);
1021
1022         return 0;
1023 }
1024
1025 static int
1026 ftrace_failures_open(struct inode *inode, struct file *file)
1027 {
1028         int ret;
1029         struct seq_file *m;
1030         struct ftrace_iterator *iter;
1031
1032         ret = ftrace_avail_open(inode, file);
1033         if (!ret) {
1034                 m = (struct seq_file *)file->private_data;
1035                 iter = (struct ftrace_iterator *)m->private;
1036                 iter->flags = FTRACE_ITER_FAILURES;
1037         }
1038
1039         return ret;
1040 }
1041
1042
1043 static void ftrace_filter_reset(int enable)
1044 {
1045         struct ftrace_page *pg;
1046         struct dyn_ftrace *rec;
1047         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1048         unsigned i;
1049
1050         /* should not be called from interrupt context */
1051         spin_lock(&ftrace_lock);
1052         if (enable)
1053                 ftrace_filtered = 0;
1054         pg = ftrace_pages_start;
1055         while (pg) {
1056                 for (i = 0; i < pg->index; i++) {
1057                         rec = &pg->records[i];
1058                         if (rec->flags & FTRACE_FL_FAILED)
1059                                 continue;
1060                         rec->flags &= ~type;
1061                 }
1062                 pg = pg->next;
1063         }
1064         spin_unlock(&ftrace_lock);
1065 }
1066
1067 static int
1068 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1069 {
1070         struct ftrace_iterator *iter;
1071         int ret = 0;
1072
1073         if (unlikely(ftrace_disabled))
1074                 return -ENODEV;
1075
1076         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1077         if (!iter)
1078                 return -ENOMEM;
1079
1080         mutex_lock(&ftrace_regex_lock);
1081         if ((file->f_mode & FMODE_WRITE) &&
1082             !(file->f_flags & O_APPEND))
1083                 ftrace_filter_reset(enable);
1084
1085         if (file->f_mode & FMODE_READ) {
1086                 iter->pg = ftrace_pages_start;
1087                 iter->pos = -1;
1088                 iter->flags = enable ? FTRACE_ITER_FILTER :
1089                         FTRACE_ITER_NOTRACE;
1090
1091                 ret = seq_open(file, &show_ftrace_seq_ops);
1092                 if (!ret) {
1093                         struct seq_file *m = file->private_data;
1094                         m->private = iter;
1095                 } else
1096                         kfree(iter);
1097         } else
1098                 file->private_data = iter;
1099         mutex_unlock(&ftrace_regex_lock);
1100
1101         return ret;
1102 }
1103
1104 static int
1105 ftrace_filter_open(struct inode *inode, struct file *file)
1106 {
1107         return ftrace_regex_open(inode, file, 1);
1108 }
1109
1110 static int
1111 ftrace_notrace_open(struct inode *inode, struct file *file)
1112 {
1113         return ftrace_regex_open(inode, file, 0);
1114 }
1115
1116 static ssize_t
1117 ftrace_regex_read(struct file *file, char __user *ubuf,
1118                        size_t cnt, loff_t *ppos)
1119 {
1120         if (file->f_mode & FMODE_READ)
1121                 return seq_read(file, ubuf, cnt, ppos);
1122         else
1123                 return -EPERM;
1124 }
1125
1126 static loff_t
1127 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1128 {
1129         loff_t ret;
1130
1131         if (file->f_mode & FMODE_READ)
1132                 ret = seq_lseek(file, offset, origin);
1133         else
1134                 file->f_pos = ret = 1;
1135
1136         return ret;
1137 }
1138
1139 enum {
1140         MATCH_FULL,
1141         MATCH_FRONT_ONLY,
1142         MATCH_MIDDLE_ONLY,
1143         MATCH_END_ONLY,
1144 };
1145
1146 static void
1147 ftrace_match(unsigned char *buff, int len, int enable)
1148 {
1149         char str[KSYM_SYMBOL_LEN];
1150         char *search = NULL;
1151         struct ftrace_page *pg;
1152         struct dyn_ftrace *rec;
1153         int type = MATCH_FULL;
1154         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1155         unsigned i, match = 0, search_len = 0;
1156
1157         for (i = 0; i < len; i++) {
1158                 if (buff[i] == '*') {
1159                         if (!i) {
1160                                 search = buff + i + 1;
1161                                 type = MATCH_END_ONLY;
1162                                 search_len = len - (i + 1);
1163                         } else {
1164                                 if (type == MATCH_END_ONLY) {
1165                                         type = MATCH_MIDDLE_ONLY;
1166                                 } else {
1167                                         match = i;
1168                                         type = MATCH_FRONT_ONLY;
1169                                 }
1170                                 buff[i] = 0;
1171                                 break;
1172                         }
1173                 }
1174         }
1175
1176         /* should not be called from interrupt context */
1177         spin_lock(&ftrace_lock);
1178         if (enable)
1179                 ftrace_filtered = 1;
1180         pg = ftrace_pages_start;
1181         while (pg) {
1182                 for (i = 0; i < pg->index; i++) {
1183                         int matched = 0;
1184                         char *ptr;
1185
1186                         rec = &pg->records[i];
1187                         if (rec->flags & FTRACE_FL_FAILED)
1188                                 continue;
1189                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1190                         switch (type) {
1191                         case MATCH_FULL:
1192                                 if (strcmp(str, buff) == 0)
1193                                         matched = 1;
1194                                 break;
1195                         case MATCH_FRONT_ONLY:
1196                                 if (memcmp(str, buff, match) == 0)
1197                                         matched = 1;
1198                                 break;
1199                         case MATCH_MIDDLE_ONLY:
1200                                 if (strstr(str, search))
1201                                         matched = 1;
1202                                 break;
1203                         case MATCH_END_ONLY:
1204                                 ptr = strstr(str, search);
1205                                 if (ptr && (ptr[search_len] == 0))
1206                                         matched = 1;
1207                                 break;
1208                         }
1209                         if (matched)
1210                                 rec->flags |= flag;
1211                 }
1212                 pg = pg->next;
1213         }
1214         spin_unlock(&ftrace_lock);
1215 }
1216
1217 static ssize_t
1218 ftrace_regex_write(struct file *file, const char __user *ubuf,
1219                    size_t cnt, loff_t *ppos, int enable)
1220 {
1221         struct ftrace_iterator *iter;
1222         char ch;
1223         size_t read = 0;
1224         ssize_t ret;
1225
1226         if (!cnt || cnt < 0)
1227                 return 0;
1228
1229         mutex_lock(&ftrace_regex_lock);
1230
1231         if (file->f_mode & FMODE_READ) {
1232                 struct seq_file *m = file->private_data;
1233                 iter = m->private;
1234         } else
1235                 iter = file->private_data;
1236
1237         if (!*ppos) {
1238                 iter->flags &= ~FTRACE_ITER_CONT;
1239                 iter->buffer_idx = 0;
1240         }
1241
1242         ret = get_user(ch, ubuf++);
1243         if (ret)
1244                 goto out;
1245         read++;
1246         cnt--;
1247
1248         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1249                 /* skip white space */
1250                 while (cnt && isspace(ch)) {
1251                         ret = get_user(ch, ubuf++);
1252                         if (ret)
1253                                 goto out;
1254                         read++;
1255                         cnt--;
1256                 }
1257
1258                 if (isspace(ch)) {
1259                         file->f_pos += read;
1260                         ret = read;
1261                         goto out;
1262                 }
1263
1264                 iter->buffer_idx = 0;
1265         }
1266
1267         while (cnt && !isspace(ch)) {
1268                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1269                         iter->buffer[iter->buffer_idx++] = ch;
1270                 else {
1271                         ret = -EINVAL;
1272                         goto out;
1273                 }
1274                 ret = get_user(ch, ubuf++);
1275                 if (ret)
1276                         goto out;
1277                 read++;
1278                 cnt--;
1279         }
1280
1281         if (isspace(ch)) {
1282                 iter->filtered++;
1283                 iter->buffer[iter->buffer_idx] = 0;
1284                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1285                 iter->buffer_idx = 0;
1286         } else
1287                 iter->flags |= FTRACE_ITER_CONT;
1288
1289
1290         file->f_pos += read;
1291
1292         ret = read;
1293  out:
1294         mutex_unlock(&ftrace_regex_lock);
1295
1296         return ret;
1297 }
1298
1299 static ssize_t
1300 ftrace_filter_write(struct file *file, const char __user *ubuf,
1301                     size_t cnt, loff_t *ppos)
1302 {
1303         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1304 }
1305
1306 static ssize_t
1307 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1308                      size_t cnt, loff_t *ppos)
1309 {
1310         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1311 }
1312
1313 static void
1314 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1315 {
1316         if (unlikely(ftrace_disabled))
1317                 return;
1318
1319         mutex_lock(&ftrace_regex_lock);
1320         if (reset)
1321                 ftrace_filter_reset(enable);
1322         if (buf)
1323                 ftrace_match(buf, len, enable);
1324         mutex_unlock(&ftrace_regex_lock);
1325 }
1326
1327 /**
1328  * ftrace_set_filter - set a function to filter on in ftrace
1329  * @buf - the string that holds the function filter text.
1330  * @len - the length of the string.
1331  * @reset - non zero to reset all filters before applying this filter.
1332  *
1333  * Filters denote which functions should be enabled when tracing is enabled.
1334  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1335  */
1336 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1337 {
1338         ftrace_set_regex(buf, len, reset, 1);
1339 }
1340
1341 /**
1342  * ftrace_set_notrace - set a function to not trace in ftrace
1343  * @buf - the string that holds the function notrace text.
1344  * @len - the length of the string.
1345  * @reset - non zero to reset all filters before applying this filter.
1346  *
1347  * Notrace Filters denote which functions should not be enabled when tracing
1348  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1349  * for tracing.
1350  */
1351 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1352 {
1353         ftrace_set_regex(buf, len, reset, 0);
1354 }
1355
1356 static int
1357 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1358 {
1359         struct seq_file *m = (struct seq_file *)file->private_data;
1360         struct ftrace_iterator *iter;
1361
1362         mutex_lock(&ftrace_regex_lock);
1363         if (file->f_mode & FMODE_READ) {
1364                 iter = m->private;
1365
1366                 seq_release(inode, file);
1367         } else
1368                 iter = file->private_data;
1369
1370         if (iter->buffer_idx) {
1371                 iter->filtered++;
1372                 iter->buffer[iter->buffer_idx] = 0;
1373                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1374         }
1375
1376         mutex_lock(&ftrace_sysctl_lock);
1377         mutex_lock(&ftraced_lock);
1378         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1379                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1380         mutex_unlock(&ftraced_lock);
1381         mutex_unlock(&ftrace_sysctl_lock);
1382
1383         kfree(iter);
1384         mutex_unlock(&ftrace_regex_lock);
1385         return 0;
1386 }
1387
1388 static int
1389 ftrace_filter_release(struct inode *inode, struct file *file)
1390 {
1391         return ftrace_regex_release(inode, file, 1);
1392 }
1393
1394 static int
1395 ftrace_notrace_release(struct inode *inode, struct file *file)
1396 {
1397         return ftrace_regex_release(inode, file, 0);
1398 }
1399
1400 static ssize_t
1401 ftraced_read(struct file *filp, char __user *ubuf,
1402                      size_t cnt, loff_t *ppos)
1403 {
1404         /* don't worry about races */
1405         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1406         int r = strlen(buf);
1407
1408         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1409 }
1410
1411 static ssize_t
1412 ftraced_write(struct file *filp, const char __user *ubuf,
1413                       size_t cnt, loff_t *ppos)
1414 {
1415         char buf[64];
1416         long val;
1417         int ret;
1418
1419         if (cnt >= sizeof(buf))
1420                 return -EINVAL;
1421
1422         if (copy_from_user(&buf, ubuf, cnt))
1423                 return -EFAULT;
1424
1425         if (strncmp(buf, "enable", 6) == 0)
1426                 val = 1;
1427         else if (strncmp(buf, "disable", 7) == 0)
1428                 val = 0;
1429         else {
1430                 buf[cnt] = 0;
1431
1432                 ret = strict_strtoul(buf, 10, &val);
1433                 if (ret < 0)
1434                         return ret;
1435
1436                 val = !!val;
1437         }
1438
1439         if (val)
1440                 ftrace_enable_daemon();
1441         else
1442                 ftrace_disable_daemon();
1443
1444         filp->f_pos += cnt;
1445
1446         return cnt;
1447 }
1448
1449 static struct file_operations ftrace_avail_fops = {
1450         .open = ftrace_avail_open,
1451         .read = seq_read,
1452         .llseek = seq_lseek,
1453         .release = ftrace_avail_release,
1454 };
1455
1456 static struct file_operations ftrace_failures_fops = {
1457         .open = ftrace_failures_open,
1458         .read = seq_read,
1459         .llseek = seq_lseek,
1460         .release = ftrace_avail_release,
1461 };
1462
1463 static struct file_operations ftrace_filter_fops = {
1464         .open = ftrace_filter_open,
1465         .read = ftrace_regex_read,
1466         .write = ftrace_filter_write,
1467         .llseek = ftrace_regex_lseek,
1468         .release = ftrace_filter_release,
1469 };
1470
1471 static struct file_operations ftrace_notrace_fops = {
1472         .open = ftrace_notrace_open,
1473         .read = ftrace_regex_read,
1474         .write = ftrace_notrace_write,
1475         .llseek = ftrace_regex_lseek,
1476         .release = ftrace_notrace_release,
1477 };
1478
1479 static struct file_operations ftraced_fops = {
1480         .open = tracing_open_generic,
1481         .read = ftraced_read,
1482         .write = ftraced_write,
1483 };
1484
1485 /**
1486  * ftrace_force_update - force an update to all recording ftrace functions
1487  */
1488 int ftrace_force_update(void)
1489 {
1490         int ret = 0;
1491
1492         if (unlikely(ftrace_disabled))
1493                 return -ENODEV;
1494
1495         mutex_lock(&ftrace_sysctl_lock);
1496         mutex_lock(&ftraced_lock);
1497
1498         /*
1499          * If ftraced_trigger is not set, then there is nothing
1500          * to update.
1501          */
1502         if (ftraced_trigger && !ftrace_update_code())
1503                 ret = -EBUSY;
1504
1505         mutex_unlock(&ftraced_lock);
1506         mutex_unlock(&ftrace_sysctl_lock);
1507
1508         return ret;
1509 }
1510
1511 static void ftrace_force_shutdown(void)
1512 {
1513         struct task_struct *task;
1514         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1515
1516         mutex_lock(&ftraced_lock);
1517         task = ftraced_task;
1518         ftraced_task = NULL;
1519         ftraced_suspend = -1;
1520         ftrace_run_update_code(command);
1521         mutex_unlock(&ftraced_lock);
1522
1523         if (task)
1524                 kthread_stop(task);
1525 }
1526
1527 static __init int ftrace_init_debugfs(void)
1528 {
1529         struct dentry *d_tracer;
1530         struct dentry *entry;
1531
1532         d_tracer = tracing_init_dentry();
1533
1534         entry = debugfs_create_file("available_filter_functions", 0444,
1535                                     d_tracer, NULL, &ftrace_avail_fops);
1536         if (!entry)
1537                 pr_warning("Could not create debugfs "
1538                            "'available_filter_functions' entry\n");
1539
1540         entry = debugfs_create_file("failures", 0444,
1541                                     d_tracer, NULL, &ftrace_failures_fops);
1542         if (!entry)
1543                 pr_warning("Could not create debugfs 'failures' entry\n");
1544
1545         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1546                                     NULL, &ftrace_filter_fops);
1547         if (!entry)
1548                 pr_warning("Could not create debugfs "
1549                            "'set_ftrace_filter' entry\n");
1550
1551         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1552                                     NULL, &ftrace_notrace_fops);
1553         if (!entry)
1554                 pr_warning("Could not create debugfs "
1555                            "'set_ftrace_notrace' entry\n");
1556
1557         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1558                                     NULL, &ftraced_fops);
1559         if (!entry)
1560                 pr_warning("Could not create debugfs "
1561                            "'ftraced_enabled' entry\n");
1562         return 0;
1563 }
1564
1565 fs_initcall(ftrace_init_debugfs);
1566
1567 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1568 static int ftrace_convert_nops(unsigned long *start,
1569                                unsigned long *end)
1570 {
1571         unsigned long *p;
1572         unsigned long addr;
1573         unsigned long flags;
1574
1575         p = start;
1576         while (p < end) {
1577                 addr = ftrace_call_adjust(*p++);
1578                 /* should not be called from interrupt context */
1579                 spin_lock(&ftrace_lock);
1580                 ftrace_record_ip(addr);
1581                 spin_unlock(&ftrace_lock);
1582                 ftrace_shutdown_replenish();
1583         }
1584
1585         /* p is ignored */
1586         local_irq_save(flags);
1587         __ftrace_update_code(p);
1588         local_irq_restore(flags);
1589
1590         return 0;
1591 }
1592
1593 void ftrace_init_module(unsigned long *start, unsigned long *end)
1594 {
1595         if (ftrace_disabled || start == end)
1596                 return;
1597         ftrace_convert_nops(start, end);
1598 }
1599
1600 extern unsigned long __start_mcount_loc[];
1601 extern unsigned long __stop_mcount_loc[];
1602
1603 void __init ftrace_init(void)
1604 {
1605         unsigned long count, addr, flags;
1606         int ret;
1607
1608         /* Keep the ftrace pointer to the stub */
1609         addr = (unsigned long)ftrace_stub;
1610
1611         local_irq_save(flags);
1612         ftrace_dyn_arch_init(&addr);
1613         local_irq_restore(flags);
1614
1615         /* ftrace_dyn_arch_init places the return code in addr */
1616         if (addr)
1617                 goto failed;
1618
1619         count = __stop_mcount_loc - __start_mcount_loc;
1620
1621         ret = ftrace_dyn_table_alloc(count);
1622         if (ret)
1623                 goto failed;
1624
1625         last_ftrace_enabled = ftrace_enabled = 1;
1626
1627         ret = ftrace_convert_nops(__start_mcount_loc,
1628                                   __stop_mcount_loc);
1629
1630         return;
1631  failed:
1632         ftrace_disabled = 1;
1633 }
1634 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1635 static int ftraced(void *ignore)
1636 {
1637         unsigned long usecs;
1638
1639         while (!kthread_should_stop()) {
1640
1641                 set_current_state(TASK_INTERRUPTIBLE);
1642
1643                 /* check once a second */
1644                 schedule_timeout(HZ);
1645
1646                 if (unlikely(ftrace_disabled))
1647                         continue;
1648
1649                 mutex_lock(&ftrace_sysctl_lock);
1650                 mutex_lock(&ftraced_lock);
1651                 if (!ftraced_suspend && !ftraced_stop &&
1652                     ftrace_update_code()) {
1653                         usecs = nsecs_to_usecs(ftrace_update_time);
1654                         if (ftrace_update_tot_cnt > 100000) {
1655                                 ftrace_update_tot_cnt = 0;
1656                                 pr_info("hm, dftrace overflow: %lu change%s"
1657                                         " (%lu total) in %lu usec%s\n",
1658                                         ftrace_update_cnt,
1659                                         ftrace_update_cnt != 1 ? "s" : "",
1660                                         ftrace_update_tot_cnt,
1661                                         usecs, usecs != 1 ? "s" : "");
1662                                 ftrace_disabled = 1;
1663                                 WARN_ON_ONCE(1);
1664                         }
1665                 }
1666                 mutex_unlock(&ftraced_lock);
1667                 mutex_unlock(&ftrace_sysctl_lock);
1668
1669                 ftrace_shutdown_replenish();
1670         }
1671         __set_current_state(TASK_RUNNING);
1672         return 0;
1673 }
1674
1675 static int __init ftrace_dynamic_init(void)
1676 {
1677         struct task_struct *p;
1678         unsigned long addr;
1679         int ret;
1680
1681         addr = (unsigned long)ftrace_record_ip;
1682
1683         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1684
1685         /* ftrace_dyn_arch_init places the return code in addr */
1686         if (addr) {
1687                 ret = (int)addr;
1688                 goto failed;
1689         }
1690
1691         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1692         if (ret)
1693                 goto failed;
1694
1695         p = kthread_run(ftraced, NULL, "ftraced");
1696         if (IS_ERR(p)) {
1697                 ret = -1;
1698                 goto failed;
1699         }
1700
1701         last_ftrace_enabled = ftrace_enabled = 1;
1702         ftraced_task = p;
1703
1704         return 0;
1705
1706  failed:
1707         ftrace_disabled = 1;
1708         return ret;
1709 }
1710
1711 core_initcall(ftrace_dynamic_init);
1712 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1713
1714 #else
1715 # define ftrace_startup()               do { } while (0)
1716 # define ftrace_shutdown()              do { } while (0)
1717 # define ftrace_startup_sysctl()        do { } while (0)
1718 # define ftrace_shutdown_sysctl()       do { } while (0)
1719 # define ftrace_force_shutdown()        do { } while (0)
1720 #endif /* CONFIG_DYNAMIC_FTRACE */
1721
1722 /**
1723  * ftrace_kill_atomic - kill ftrace from critical sections
1724  *
1725  * This function should be used by panic code. It stops ftrace
1726  * but in a not so nice way. If you need to simply kill ftrace
1727  * from a non-atomic section, use ftrace_kill.
1728  */
1729 void ftrace_kill_atomic(void)
1730 {
1731         ftrace_disabled = 1;
1732         ftrace_enabled = 0;
1733 #ifdef CONFIG_DYNAMIC_FTRACE
1734         ftraced_suspend = -1;
1735 #endif
1736         clear_ftrace_function();
1737 }
1738
1739 /**
1740  * ftrace_kill - totally shutdown ftrace
1741  *
1742  * This is a safety measure. If something was detected that seems
1743  * wrong, calling this function will keep ftrace from doing
1744  * any more modifications, and updates.
1745  * used when something went wrong.
1746  */
1747 void ftrace_kill(void)
1748 {
1749         mutex_lock(&ftrace_sysctl_lock);
1750         ftrace_disabled = 1;
1751         ftrace_enabled = 0;
1752
1753         clear_ftrace_function();
1754         mutex_unlock(&ftrace_sysctl_lock);
1755
1756         /* Try to totally disable ftrace */
1757         ftrace_force_shutdown();
1758 }
1759
1760 /**
1761  * register_ftrace_function - register a function for profiling
1762  * @ops - ops structure that holds the function for profiling.
1763  *
1764  * Register a function to be called by all functions in the
1765  * kernel.
1766  *
1767  * Note: @ops->func and all the functions it calls must be labeled
1768  *       with "notrace", otherwise it will go into a
1769  *       recursive loop.
1770  */
1771 int register_ftrace_function(struct ftrace_ops *ops)
1772 {
1773         int ret;
1774
1775         if (unlikely(ftrace_disabled))
1776                 return -1;
1777
1778         mutex_lock(&ftrace_sysctl_lock);
1779         ret = __register_ftrace_function(ops);
1780         ftrace_startup();
1781         mutex_unlock(&ftrace_sysctl_lock);
1782
1783         return ret;
1784 }
1785
1786 /**
1787  * unregister_ftrace_function - unresgister a function for profiling.
1788  * @ops - ops structure that holds the function to unregister
1789  *
1790  * Unregister a function that was added to be called by ftrace profiling.
1791  */
1792 int unregister_ftrace_function(struct ftrace_ops *ops)
1793 {
1794         int ret;
1795
1796         mutex_lock(&ftrace_sysctl_lock);
1797         ret = __unregister_ftrace_function(ops);
1798         ftrace_shutdown();
1799         mutex_unlock(&ftrace_sysctl_lock);
1800
1801         return ret;
1802 }
1803
1804 int
1805 ftrace_enable_sysctl(struct ctl_table *table, int write,
1806                      struct file *file, void __user *buffer, size_t *lenp,
1807                      loff_t *ppos)
1808 {
1809         int ret;
1810
1811         if (unlikely(ftrace_disabled))
1812                 return -ENODEV;
1813
1814         mutex_lock(&ftrace_sysctl_lock);
1815
1816         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1817
1818         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1819                 goto out;
1820
1821         last_ftrace_enabled = ftrace_enabled;
1822
1823         if (ftrace_enabled) {
1824
1825                 ftrace_startup_sysctl();
1826
1827                 /* we are starting ftrace again */
1828                 if (ftrace_list != &ftrace_list_end) {
1829                         if (ftrace_list->next == &ftrace_list_end)
1830                                 ftrace_trace_function = ftrace_list->func;
1831                         else
1832                                 ftrace_trace_function = ftrace_list_func;
1833                 }
1834
1835         } else {
1836                 /* stopping ftrace calls (just send to ftrace_stub) */
1837                 ftrace_trace_function = ftrace_stub;
1838
1839                 ftrace_shutdown_sysctl();
1840         }
1841
1842  out:
1843         mutex_unlock(&ftrace_sysctl_lock);
1844         return ret;
1845 }