]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: remove direct reference to mcount in trace code
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * Since MCOUNT_ADDR may point to mcount itself, we do not want
41  * to get it confused by reading a reference in the code as we
42  * are parsing on objcopy output of text. Use a variable for
43  * it instead.
44  */
45 static unsigned long mcount_addr = MCOUNT_ADDR;
46
47 /*
48  * ftrace_disabled is set when an anomaly is discovered.
49  * ftrace_disabled is much stronger than ftrace_enabled.
50  */
51 static int ftrace_disabled __read_mostly;
52
53 static DEFINE_SPINLOCK(ftrace_lock);
54 static DEFINE_MUTEX(ftrace_sysctl_lock);
55
56 static struct ftrace_ops ftrace_list_end __read_mostly =
57 {
58         .func = ftrace_stub,
59 };
60
61 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
62 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
63
64 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
65 {
66         struct ftrace_ops *op = ftrace_list;
67
68         /* in case someone actually ports this to alpha! */
69         read_barrier_depends();
70
71         while (op != &ftrace_list_end) {
72                 /* silly alpha */
73                 read_barrier_depends();
74                 op->func(ip, parent_ip);
75                 op = op->next;
76         };
77 }
78
79 /**
80  * clear_ftrace_function - reset the ftrace function
81  *
82  * This NULLs the ftrace function and in essence stops
83  * tracing.  There may be lag
84  */
85 void clear_ftrace_function(void)
86 {
87         ftrace_trace_function = ftrace_stub;
88 }
89
90 static int __register_ftrace_function(struct ftrace_ops *ops)
91 {
92         /* should not be called from interrupt context */
93         spin_lock(&ftrace_lock);
94
95         ops->next = ftrace_list;
96         /*
97          * We are entering ops into the ftrace_list but another
98          * CPU might be walking that list. We need to make sure
99          * the ops->next pointer is valid before another CPU sees
100          * the ops pointer included into the ftrace_list.
101          */
102         smp_wmb();
103         ftrace_list = ops;
104
105         if (ftrace_enabled) {
106                 /*
107                  * For one func, simply call it directly.
108                  * For more than one func, call the chain.
109                  */
110                 if (ops->next == &ftrace_list_end)
111                         ftrace_trace_function = ops->func;
112                 else
113                         ftrace_trace_function = ftrace_list_func;
114         }
115
116         spin_unlock(&ftrace_lock);
117
118         return 0;
119 }
120
121 static int __unregister_ftrace_function(struct ftrace_ops *ops)
122 {
123         struct ftrace_ops **p;
124         int ret = 0;
125
126         /* should not be called from interrupt context */
127         spin_lock(&ftrace_lock);
128
129         /*
130          * If we are removing the last function, then simply point
131          * to the ftrace_stub.
132          */
133         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
134                 ftrace_trace_function = ftrace_stub;
135                 ftrace_list = &ftrace_list_end;
136                 goto out;
137         }
138
139         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
140                 if (*p == ops)
141                         break;
142
143         if (*p != ops) {
144                 ret = -1;
145                 goto out;
146         }
147
148         *p = (*p)->next;
149
150         if (ftrace_enabled) {
151                 /* If we only have one func left, then call that directly */
152                 if (ftrace_list == &ftrace_list_end ||
153                     ftrace_list->next == &ftrace_list_end)
154                         ftrace_trace_function = ftrace_list->func;
155         }
156
157  out:
158         spin_unlock(&ftrace_lock);
159
160         return ret;
161 }
162
163 #ifdef CONFIG_DYNAMIC_FTRACE
164
165 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
166 /*
167  * The hash lock is only needed when the recording of the mcount
168  * callers are dynamic. That is, by the caller themselves and
169  * not recorded via the compilation.
170  */
171 static DEFINE_SPINLOCK(ftrace_hash_lock);
172 #define ftrace_hash_lock(flags)   spin_lock_irqsave(&ftrace_hash_lock, flags)
173 #define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
174 #else
175 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
176 #define ftrace_hash_lock(flags)   do { (void)(flags); } while (0)
177 #define ftrace_hash_unlock(flags) do { } while(0)
178 #endif
179
180 static struct task_struct *ftraced_task;
181
182 enum {
183         FTRACE_ENABLE_CALLS             = (1 << 0),
184         FTRACE_DISABLE_CALLS            = (1 << 1),
185         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
186         FTRACE_ENABLE_MCOUNT            = (1 << 3),
187         FTRACE_DISABLE_MCOUNT           = (1 << 4),
188 };
189
190 static int ftrace_filtered;
191 static int tracing_on;
192 static int frozen_record_count;
193
194 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
195
196 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
197
198 static DEFINE_MUTEX(ftraced_lock);
199 static DEFINE_MUTEX(ftrace_regex_lock);
200
201 struct ftrace_page {
202         struct ftrace_page      *next;
203         unsigned long           index;
204         struct dyn_ftrace       records[];
205 };
206
207 #define ENTRIES_PER_PAGE \
208   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
209
210 /* estimate from running different kernels */
211 #define NR_TO_INIT              10000
212
213 static struct ftrace_page       *ftrace_pages_start;
214 static struct ftrace_page       *ftrace_pages;
215
216 static int ftraced_trigger;
217 static int ftraced_suspend;
218 static int ftraced_stop;
219
220 static int ftrace_record_suspend;
221
222 static struct dyn_ftrace *ftrace_free_records;
223
224
225 #ifdef CONFIG_KPROBES
226 static inline void freeze_record(struct dyn_ftrace *rec)
227 {
228         if (!(rec->flags & FTRACE_FL_FROZEN)) {
229                 rec->flags |= FTRACE_FL_FROZEN;
230                 frozen_record_count++;
231         }
232 }
233
234 static inline void unfreeze_record(struct dyn_ftrace *rec)
235 {
236         if (rec->flags & FTRACE_FL_FROZEN) {
237                 rec->flags &= ~FTRACE_FL_FROZEN;
238                 frozen_record_count--;
239         }
240 }
241
242 static inline int record_frozen(struct dyn_ftrace *rec)
243 {
244         return rec->flags & FTRACE_FL_FROZEN;
245 }
246 #else
247 # define freeze_record(rec)                     ({ 0; })
248 # define unfreeze_record(rec)                   ({ 0; })
249 # define record_frozen(rec)                     ({ 0; })
250 #endif /* CONFIG_KPROBES */
251
252 int skip_trace(unsigned long ip)
253 {
254         unsigned long fl;
255         struct dyn_ftrace *rec;
256         struct hlist_node *t;
257         struct hlist_head *head;
258
259         if (frozen_record_count == 0)
260                 return 0;
261
262         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
263         hlist_for_each_entry_rcu(rec, t, head, node) {
264                 if (rec->ip == ip) {
265                         if (record_frozen(rec)) {
266                                 if (rec->flags & FTRACE_FL_FAILED)
267                                         return 1;
268
269                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
270                                         return 1;
271
272                                 if (!tracing_on || !ftrace_enabled)
273                                         return 1;
274
275                                 if (ftrace_filtered) {
276                                         fl = rec->flags & (FTRACE_FL_FILTER |
277                                                            FTRACE_FL_NOTRACE);
278                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
279                                                 return 1;
280                                 }
281                         }
282                         break;
283                 }
284         }
285
286         return 0;
287 }
288
289 static inline int
290 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
291 {
292         struct dyn_ftrace *p;
293         struct hlist_node *t;
294         int found = 0;
295
296         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
297                 if (p->ip == ip) {
298                         found = 1;
299                         break;
300                 }
301         }
302
303         return found;
304 }
305
306 static inline void
307 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
308 {
309         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
310 }
311
312 /* called from kstop_machine */
313 static inline void ftrace_del_hash(struct dyn_ftrace *node)
314 {
315         hlist_del(&node->node);
316 }
317
318 static void ftrace_free_rec(struct dyn_ftrace *rec)
319 {
320         rec->ip = (unsigned long)ftrace_free_records;
321         ftrace_free_records = rec;
322         rec->flags |= FTRACE_FL_FREE;
323 }
324
325 void ftrace_release(void *start, unsigned long size)
326 {
327         struct dyn_ftrace *rec;
328         struct ftrace_page *pg;
329         unsigned long s = (unsigned long)start;
330         unsigned long e = s + size;
331         int i;
332
333         if (ftrace_disabled || !start)
334                 return;
335
336         /* should not be called from interrupt context */
337         spin_lock(&ftrace_lock);
338
339         for (pg = ftrace_pages_start; pg; pg = pg->next) {
340                 for (i = 0; i < pg->index; i++) {
341                         rec = &pg->records[i];
342
343                         if ((rec->ip >= s) && (rec->ip < e))
344                                 ftrace_free_rec(rec);
345                 }
346         }
347         spin_unlock(&ftrace_lock);
348
349 }
350
351 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
352 {
353         struct dyn_ftrace *rec;
354
355         /* First check for freed records */
356         if (ftrace_free_records) {
357                 rec = ftrace_free_records;
358
359                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
360                         WARN_ON_ONCE(1);
361                         ftrace_free_records = NULL;
362                         ftrace_disabled = 1;
363                         ftrace_enabled = 0;
364                         return NULL;
365                 }
366
367                 ftrace_free_records = (void *)rec->ip;
368                 memset(rec, 0, sizeof(*rec));
369                 return rec;
370         }
371
372         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
373                 if (!ftrace_pages->next)
374                         return NULL;
375                 ftrace_pages = ftrace_pages->next;
376         }
377
378         return &ftrace_pages->records[ftrace_pages->index++];
379 }
380
381 static void
382 ftrace_record_ip(unsigned long ip)
383 {
384         struct dyn_ftrace *node;
385         unsigned long flags;
386         unsigned long key;
387         int resched;
388         int cpu;
389
390         if (!ftrace_enabled || ftrace_disabled)
391                 return;
392
393         resched = need_resched();
394         preempt_disable_notrace();
395
396         /*
397          * We simply need to protect against recursion.
398          * Use the the raw version of smp_processor_id and not
399          * __get_cpu_var which can call debug hooks that can
400          * cause a recursive crash here.
401          */
402         cpu = raw_smp_processor_id();
403         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
404         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
405                 goto out;
406
407         if (unlikely(ftrace_record_suspend))
408                 goto out;
409
410         key = hash_long(ip, FTRACE_HASHBITS);
411
412         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
413
414         if (ftrace_ip_in_hash(ip, key))
415                 goto out;
416
417         ftrace_hash_lock(flags);
418
419         /* This ip may have hit the hash before the lock */
420         if (ftrace_ip_in_hash(ip, key))
421                 goto out_unlock;
422
423         node = ftrace_alloc_dyn_node(ip);
424         if (!node)
425                 goto out_unlock;
426
427         node->ip = ip;
428
429         ftrace_add_hash(node, key);
430
431         ftraced_trigger = 1;
432
433  out_unlock:
434         ftrace_hash_unlock(flags);
435  out:
436         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
437
438         /* prevent recursion with scheduler */
439         if (resched)
440                 preempt_enable_no_resched_notrace();
441         else
442                 preempt_enable_notrace();
443 }
444
445 #define FTRACE_ADDR ((long)(ftrace_caller))
446
447 static int
448 __ftrace_replace_code(struct dyn_ftrace *rec,
449                       unsigned char *old, unsigned char *new, int enable)
450 {
451         unsigned long ip, fl;
452
453         ip = rec->ip;
454
455         if (ftrace_filtered && enable) {
456                 /*
457                  * If filtering is on:
458                  *
459                  * If this record is set to be filtered and
460                  * is enabled then do nothing.
461                  *
462                  * If this record is set to be filtered and
463                  * it is not enabled, enable it.
464                  *
465                  * If this record is not set to be filtered
466                  * and it is not enabled do nothing.
467                  *
468                  * If this record is set not to trace then
469                  * do nothing.
470                  *
471                  * If this record is set not to trace and
472                  * it is enabled then disable it.
473                  *
474                  * If this record is not set to be filtered and
475                  * it is enabled, disable it.
476                  */
477
478                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
479                                    FTRACE_FL_ENABLED);
480
481                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
482                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
483                     !fl || (fl == FTRACE_FL_NOTRACE))
484                         return 0;
485
486                 /*
487                  * If it is enabled disable it,
488                  * otherwise enable it!
489                  */
490                 if (fl & FTRACE_FL_ENABLED) {
491                         /* swap new and old */
492                         new = old;
493                         old = ftrace_call_replace(ip, FTRACE_ADDR);
494                         rec->flags &= ~FTRACE_FL_ENABLED;
495                 } else {
496                         new = ftrace_call_replace(ip, FTRACE_ADDR);
497                         rec->flags |= FTRACE_FL_ENABLED;
498                 }
499         } else {
500
501                 if (enable) {
502                         /*
503                          * If this record is set not to trace and is
504                          * not enabled, do nothing.
505                          */
506                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
507                         if (fl == FTRACE_FL_NOTRACE)
508                                 return 0;
509
510                         new = ftrace_call_replace(ip, FTRACE_ADDR);
511                 } else
512                         old = ftrace_call_replace(ip, FTRACE_ADDR);
513
514                 if (enable) {
515                         if (rec->flags & FTRACE_FL_ENABLED)
516                                 return 0;
517                         rec->flags |= FTRACE_FL_ENABLED;
518                 } else {
519                         if (!(rec->flags & FTRACE_FL_ENABLED))
520                                 return 0;
521                         rec->flags &= ~FTRACE_FL_ENABLED;
522                 }
523         }
524
525         return ftrace_modify_code(ip, old, new);
526 }
527
528 static void ftrace_replace_code(int enable)
529 {
530         int i, failed;
531         unsigned char *new = NULL, *old = NULL;
532         struct dyn_ftrace *rec;
533         struct ftrace_page *pg;
534
535         if (enable)
536                 old = ftrace_nop_replace();
537         else
538                 new = ftrace_nop_replace();
539
540         for (pg = ftrace_pages_start; pg; pg = pg->next) {
541                 for (i = 0; i < pg->index; i++) {
542                         rec = &pg->records[i];
543
544                         /* don't modify code that has already faulted */
545                         if (rec->flags & FTRACE_FL_FAILED)
546                                 continue;
547
548                         /* ignore updates to this record's mcount site */
549                         if (get_kprobe((void *)rec->ip)) {
550                                 freeze_record(rec);
551                                 continue;
552                         } else {
553                                 unfreeze_record(rec);
554                         }
555
556                         failed = __ftrace_replace_code(rec, old, new, enable);
557                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
558                                 rec->flags |= FTRACE_FL_FAILED;
559                                 if ((system_state == SYSTEM_BOOTING) ||
560                                     !core_kernel_text(rec->ip)) {
561                                         ftrace_del_hash(rec);
562                                         ftrace_free_rec(rec);
563                                 }
564                         }
565                 }
566         }
567 }
568
569 static void ftrace_shutdown_replenish(void)
570 {
571         if (ftrace_pages->next)
572                 return;
573
574         /* allocate another page */
575         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
576 }
577
578 static int
579 ftrace_code_disable(struct dyn_ftrace *rec)
580 {
581         unsigned long ip;
582         unsigned char *nop, *call;
583         int failed;
584
585         ip = rec->ip;
586
587         nop = ftrace_nop_replace();
588         call = ftrace_call_replace(ip, mcount_addr);
589
590         failed = ftrace_modify_code(ip, call, nop);
591         if (failed) {
592                 rec->flags |= FTRACE_FL_FAILED;
593                 return 0;
594         }
595         return 1;
596 }
597
598 static int __ftrace_update_code(void *ignore);
599
600 static int __ftrace_modify_code(void *data)
601 {
602         unsigned long addr;
603         int *command = data;
604
605         if (*command & FTRACE_ENABLE_CALLS) {
606                 /*
607                  * Update any recorded ips now that we have the
608                  * machine stopped
609                  */
610                 __ftrace_update_code(NULL);
611                 ftrace_replace_code(1);
612                 tracing_on = 1;
613         } else if (*command & FTRACE_DISABLE_CALLS) {
614                 ftrace_replace_code(0);
615                 tracing_on = 0;
616         }
617
618         if (*command & FTRACE_UPDATE_TRACE_FUNC)
619                 ftrace_update_ftrace_func(ftrace_trace_function);
620
621         if (*command & FTRACE_ENABLE_MCOUNT) {
622                 addr = (unsigned long)ftrace_record_ip;
623                 ftrace_mcount_set(&addr);
624         } else if (*command & FTRACE_DISABLE_MCOUNT) {
625                 addr = (unsigned long)ftrace_stub;
626                 ftrace_mcount_set(&addr);
627         }
628
629         return 0;
630 }
631
632 static void ftrace_run_update_code(int command)
633 {
634         stop_machine(__ftrace_modify_code, &command, NULL);
635 }
636
637 void ftrace_disable_daemon(void)
638 {
639         /* Stop the daemon from calling kstop_machine */
640         mutex_lock(&ftraced_lock);
641         ftraced_stop = 1;
642         mutex_unlock(&ftraced_lock);
643
644         ftrace_force_update();
645 }
646
647 void ftrace_enable_daemon(void)
648 {
649         mutex_lock(&ftraced_lock);
650         ftraced_stop = 0;
651         mutex_unlock(&ftraced_lock);
652
653         ftrace_force_update();
654 }
655
656 static ftrace_func_t saved_ftrace_func;
657
658 static void ftrace_startup(void)
659 {
660         int command = 0;
661
662         if (unlikely(ftrace_disabled))
663                 return;
664
665         mutex_lock(&ftraced_lock);
666         ftraced_suspend++;
667         if (ftraced_suspend == 1)
668                 command |= FTRACE_ENABLE_CALLS;
669
670         if (saved_ftrace_func != ftrace_trace_function) {
671                 saved_ftrace_func = ftrace_trace_function;
672                 command |= FTRACE_UPDATE_TRACE_FUNC;
673         }
674
675         if (!command || !ftrace_enabled)
676                 goto out;
677
678         ftrace_run_update_code(command);
679  out:
680         mutex_unlock(&ftraced_lock);
681 }
682
683 static void ftrace_shutdown(void)
684 {
685         int command = 0;
686
687         if (unlikely(ftrace_disabled))
688                 return;
689
690         mutex_lock(&ftraced_lock);
691         ftraced_suspend--;
692         if (!ftraced_suspend)
693                 command |= FTRACE_DISABLE_CALLS;
694
695         if (saved_ftrace_func != ftrace_trace_function) {
696                 saved_ftrace_func = ftrace_trace_function;
697                 command |= FTRACE_UPDATE_TRACE_FUNC;
698         }
699
700         if (!command || !ftrace_enabled)
701                 goto out;
702
703         ftrace_run_update_code(command);
704  out:
705         mutex_unlock(&ftraced_lock);
706 }
707
708 static void ftrace_startup_sysctl(void)
709 {
710         int command = FTRACE_ENABLE_MCOUNT;
711
712         if (unlikely(ftrace_disabled))
713                 return;
714
715         mutex_lock(&ftraced_lock);
716         /* Force update next time */
717         saved_ftrace_func = NULL;
718         /* ftraced_suspend is true if we want ftrace running */
719         if (ftraced_suspend)
720                 command |= FTRACE_ENABLE_CALLS;
721
722         ftrace_run_update_code(command);
723         mutex_unlock(&ftraced_lock);
724 }
725
726 static void ftrace_shutdown_sysctl(void)
727 {
728         int command = FTRACE_DISABLE_MCOUNT;
729
730         if (unlikely(ftrace_disabled))
731                 return;
732
733         mutex_lock(&ftraced_lock);
734         /* ftraced_suspend is true if ftrace is running */
735         if (ftraced_suspend)
736                 command |= FTRACE_DISABLE_CALLS;
737
738         ftrace_run_update_code(command);
739         mutex_unlock(&ftraced_lock);
740 }
741
742 static cycle_t          ftrace_update_time;
743 static unsigned long    ftrace_update_cnt;
744 unsigned long           ftrace_update_tot_cnt;
745
746 static int __ftrace_update_code(void *ignore)
747 {
748         int i, save_ftrace_enabled;
749         cycle_t start, stop;
750         struct dyn_ftrace *p;
751         struct hlist_node *t, *n;
752         struct hlist_head *head, temp_list;
753
754         /* Don't be recording funcs now */
755         ftrace_record_suspend++;
756         save_ftrace_enabled = ftrace_enabled;
757         ftrace_enabled = 0;
758
759         start = ftrace_now(raw_smp_processor_id());
760         ftrace_update_cnt = 0;
761
762         /* No locks needed, the machine is stopped! */
763         for (i = 0; i < FTRACE_HASHSIZE; i++) {
764                 INIT_HLIST_HEAD(&temp_list);
765                 head = &ftrace_hash[i];
766
767                 /* all CPUS are stopped, we are safe to modify code */
768                 hlist_for_each_entry_safe(p, t, n, head, node) {
769                         /* Skip over failed records which have not been
770                          * freed. */
771                         if (p->flags & FTRACE_FL_FAILED)
772                                 continue;
773
774                         /* Unconverted records are always at the head of the
775                          * hash bucket. Once we encounter a converted record,
776                          * simply skip over to the next bucket. Saves ftraced
777                          * some processor cycles (ftrace does its bid for
778                          * global warming :-p ). */
779                         if (p->flags & (FTRACE_FL_CONVERTED))
780                                 break;
781
782                         /* Ignore updates to this record's mcount site.
783                          * Reintroduce this record at the head of this
784                          * bucket to attempt to "convert" it again if
785                          * the kprobe on it is unregistered before the
786                          * next run. */
787                         if (get_kprobe((void *)p->ip)) {
788                                 ftrace_del_hash(p);
789                                 INIT_HLIST_NODE(&p->node);
790                                 hlist_add_head(&p->node, &temp_list);
791                                 freeze_record(p);
792                                 continue;
793                         } else {
794                                 unfreeze_record(p);
795                         }
796
797                         /* convert record (i.e, patch mcount-call with NOP) */
798                         if (ftrace_code_disable(p)) {
799                                 p->flags |= FTRACE_FL_CONVERTED;
800                                 ftrace_update_cnt++;
801                         } else {
802                                 if ((system_state == SYSTEM_BOOTING) ||
803                                     !core_kernel_text(p->ip)) {
804                                         ftrace_del_hash(p);
805                                         ftrace_free_rec(p);
806                                 }
807                         }
808                 }
809
810                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
811                         hlist_del(&p->node);
812                         INIT_HLIST_NODE(&p->node);
813                         hlist_add_head(&p->node, head);
814                 }
815         }
816
817         stop = ftrace_now(raw_smp_processor_id());
818         ftrace_update_time = stop - start;
819         ftrace_update_tot_cnt += ftrace_update_cnt;
820         ftraced_trigger = 0;
821
822         ftrace_enabled = save_ftrace_enabled;
823         ftrace_record_suspend--;
824
825         return 0;
826 }
827
828 static int ftrace_update_code(void)
829 {
830         if (unlikely(ftrace_disabled) ||
831             !ftrace_enabled || !ftraced_trigger)
832                 return 0;
833
834         stop_machine(__ftrace_update_code, NULL, NULL);
835
836         return 1;
837 }
838
839 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
840 {
841         struct ftrace_page *pg;
842         int cnt;
843         int i;
844
845         /* allocate a few pages */
846         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
847         if (!ftrace_pages_start)
848                 return -1;
849
850         /*
851          * Allocate a few more pages.
852          *
853          * TODO: have some parser search vmlinux before
854          *   final linking to find all calls to ftrace.
855          *   Then we can:
856          *    a) know how many pages to allocate.
857          *     and/or
858          *    b) set up the table then.
859          *
860          *  The dynamic code is still necessary for
861          *  modules.
862          */
863
864         pg = ftrace_pages = ftrace_pages_start;
865
866         cnt = num_to_init / ENTRIES_PER_PAGE;
867         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
868                 num_to_init, cnt);
869
870         for (i = 0; i < cnt; i++) {
871                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
872
873                 /* If we fail, we'll try later anyway */
874                 if (!pg->next)
875                         break;
876
877                 pg = pg->next;
878         }
879
880         return 0;
881 }
882
883 enum {
884         FTRACE_ITER_FILTER      = (1 << 0),
885         FTRACE_ITER_CONT        = (1 << 1),
886         FTRACE_ITER_NOTRACE     = (1 << 2),
887         FTRACE_ITER_FAILURES    = (1 << 3),
888 };
889
890 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
891
892 struct ftrace_iterator {
893         loff_t                  pos;
894         struct ftrace_page      *pg;
895         unsigned                idx;
896         unsigned                flags;
897         unsigned char           buffer[FTRACE_BUFF_MAX+1];
898         unsigned                buffer_idx;
899         unsigned                filtered;
900 };
901
902 static void *
903 t_next(struct seq_file *m, void *v, loff_t *pos)
904 {
905         struct ftrace_iterator *iter = m->private;
906         struct dyn_ftrace *rec = NULL;
907
908         (*pos)++;
909
910         /* should not be called from interrupt context */
911         spin_lock(&ftrace_lock);
912  retry:
913         if (iter->idx >= iter->pg->index) {
914                 if (iter->pg->next) {
915                         iter->pg = iter->pg->next;
916                         iter->idx = 0;
917                         goto retry;
918                 }
919         } else {
920                 rec = &iter->pg->records[iter->idx++];
921                 if ((rec->flags & FTRACE_FL_FREE) ||
922
923                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
924                      (rec->flags & FTRACE_FL_FAILED)) ||
925
926                     ((iter->flags & FTRACE_ITER_FAILURES) &&
927                      !(rec->flags & FTRACE_FL_FAILED)) ||
928
929                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
930                      !(rec->flags & FTRACE_FL_NOTRACE))) {
931                         rec = NULL;
932                         goto retry;
933                 }
934         }
935         spin_unlock(&ftrace_lock);
936
937         iter->pos = *pos;
938
939         return rec;
940 }
941
942 static void *t_start(struct seq_file *m, loff_t *pos)
943 {
944         struct ftrace_iterator *iter = m->private;
945         void *p = NULL;
946         loff_t l = -1;
947
948         if (*pos != iter->pos) {
949                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
950                         ;
951         } else {
952                 l = *pos;
953                 p = t_next(m, p, &l);
954         }
955
956         return p;
957 }
958
959 static void t_stop(struct seq_file *m, void *p)
960 {
961 }
962
963 static int t_show(struct seq_file *m, void *v)
964 {
965         struct dyn_ftrace *rec = v;
966         char str[KSYM_SYMBOL_LEN];
967
968         if (!rec)
969                 return 0;
970
971         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
972
973         seq_printf(m, "%s\n", str);
974
975         return 0;
976 }
977
978 static struct seq_operations show_ftrace_seq_ops = {
979         .start = t_start,
980         .next = t_next,
981         .stop = t_stop,
982         .show = t_show,
983 };
984
985 static int
986 ftrace_avail_open(struct inode *inode, struct file *file)
987 {
988         struct ftrace_iterator *iter;
989         int ret;
990
991         if (unlikely(ftrace_disabled))
992                 return -ENODEV;
993
994         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
995         if (!iter)
996                 return -ENOMEM;
997
998         iter->pg = ftrace_pages_start;
999         iter->pos = -1;
1000
1001         ret = seq_open(file, &show_ftrace_seq_ops);
1002         if (!ret) {
1003                 struct seq_file *m = file->private_data;
1004
1005                 m->private = iter;
1006         } else {
1007                 kfree(iter);
1008         }
1009
1010         return ret;
1011 }
1012
1013 int ftrace_avail_release(struct inode *inode, struct file *file)
1014 {
1015         struct seq_file *m = (struct seq_file *)file->private_data;
1016         struct ftrace_iterator *iter = m->private;
1017
1018         seq_release(inode, file);
1019         kfree(iter);
1020
1021         return 0;
1022 }
1023
1024 static int
1025 ftrace_failures_open(struct inode *inode, struct file *file)
1026 {
1027         int ret;
1028         struct seq_file *m;
1029         struct ftrace_iterator *iter;
1030
1031         ret = ftrace_avail_open(inode, file);
1032         if (!ret) {
1033                 m = (struct seq_file *)file->private_data;
1034                 iter = (struct ftrace_iterator *)m->private;
1035                 iter->flags = FTRACE_ITER_FAILURES;
1036         }
1037
1038         return ret;
1039 }
1040
1041
1042 static void ftrace_filter_reset(int enable)
1043 {
1044         struct ftrace_page *pg;
1045         struct dyn_ftrace *rec;
1046         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1047         unsigned i;
1048
1049         /* should not be called from interrupt context */
1050         spin_lock(&ftrace_lock);
1051         if (enable)
1052                 ftrace_filtered = 0;
1053         pg = ftrace_pages_start;
1054         while (pg) {
1055                 for (i = 0; i < pg->index; i++) {
1056                         rec = &pg->records[i];
1057                         if (rec->flags & FTRACE_FL_FAILED)
1058                                 continue;
1059                         rec->flags &= ~type;
1060                 }
1061                 pg = pg->next;
1062         }
1063         spin_unlock(&ftrace_lock);
1064 }
1065
1066 static int
1067 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1068 {
1069         struct ftrace_iterator *iter;
1070         int ret = 0;
1071
1072         if (unlikely(ftrace_disabled))
1073                 return -ENODEV;
1074
1075         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1076         if (!iter)
1077                 return -ENOMEM;
1078
1079         mutex_lock(&ftrace_regex_lock);
1080         if ((file->f_mode & FMODE_WRITE) &&
1081             !(file->f_flags & O_APPEND))
1082                 ftrace_filter_reset(enable);
1083
1084         if (file->f_mode & FMODE_READ) {
1085                 iter->pg = ftrace_pages_start;
1086                 iter->pos = -1;
1087                 iter->flags = enable ? FTRACE_ITER_FILTER :
1088                         FTRACE_ITER_NOTRACE;
1089
1090                 ret = seq_open(file, &show_ftrace_seq_ops);
1091                 if (!ret) {
1092                         struct seq_file *m = file->private_data;
1093                         m->private = iter;
1094                 } else
1095                         kfree(iter);
1096         } else
1097                 file->private_data = iter;
1098         mutex_unlock(&ftrace_regex_lock);
1099
1100         return ret;
1101 }
1102
1103 static int
1104 ftrace_filter_open(struct inode *inode, struct file *file)
1105 {
1106         return ftrace_regex_open(inode, file, 1);
1107 }
1108
1109 static int
1110 ftrace_notrace_open(struct inode *inode, struct file *file)
1111 {
1112         return ftrace_regex_open(inode, file, 0);
1113 }
1114
1115 static ssize_t
1116 ftrace_regex_read(struct file *file, char __user *ubuf,
1117                        size_t cnt, loff_t *ppos)
1118 {
1119         if (file->f_mode & FMODE_READ)
1120                 return seq_read(file, ubuf, cnt, ppos);
1121         else
1122                 return -EPERM;
1123 }
1124
1125 static loff_t
1126 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1127 {
1128         loff_t ret;
1129
1130         if (file->f_mode & FMODE_READ)
1131                 ret = seq_lseek(file, offset, origin);
1132         else
1133                 file->f_pos = ret = 1;
1134
1135         return ret;
1136 }
1137
1138 enum {
1139         MATCH_FULL,
1140         MATCH_FRONT_ONLY,
1141         MATCH_MIDDLE_ONLY,
1142         MATCH_END_ONLY,
1143 };
1144
1145 static void
1146 ftrace_match(unsigned char *buff, int len, int enable)
1147 {
1148         char str[KSYM_SYMBOL_LEN];
1149         char *search = NULL;
1150         struct ftrace_page *pg;
1151         struct dyn_ftrace *rec;
1152         int type = MATCH_FULL;
1153         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1154         unsigned i, match = 0, search_len = 0;
1155
1156         for (i = 0; i < len; i++) {
1157                 if (buff[i] == '*') {
1158                         if (!i) {
1159                                 search = buff + i + 1;
1160                                 type = MATCH_END_ONLY;
1161                                 search_len = len - (i + 1);
1162                         } else {
1163                                 if (type == MATCH_END_ONLY) {
1164                                         type = MATCH_MIDDLE_ONLY;
1165                                 } else {
1166                                         match = i;
1167                                         type = MATCH_FRONT_ONLY;
1168                                 }
1169                                 buff[i] = 0;
1170                                 break;
1171                         }
1172                 }
1173         }
1174
1175         /* should not be called from interrupt context */
1176         spin_lock(&ftrace_lock);
1177         if (enable)
1178                 ftrace_filtered = 1;
1179         pg = ftrace_pages_start;
1180         while (pg) {
1181                 for (i = 0; i < pg->index; i++) {
1182                         int matched = 0;
1183                         char *ptr;
1184
1185                         rec = &pg->records[i];
1186                         if (rec->flags & FTRACE_FL_FAILED)
1187                                 continue;
1188                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1189                         switch (type) {
1190                         case MATCH_FULL:
1191                                 if (strcmp(str, buff) == 0)
1192                                         matched = 1;
1193                                 break;
1194                         case MATCH_FRONT_ONLY:
1195                                 if (memcmp(str, buff, match) == 0)
1196                                         matched = 1;
1197                                 break;
1198                         case MATCH_MIDDLE_ONLY:
1199                                 if (strstr(str, search))
1200                                         matched = 1;
1201                                 break;
1202                         case MATCH_END_ONLY:
1203                                 ptr = strstr(str, search);
1204                                 if (ptr && (ptr[search_len] == 0))
1205                                         matched = 1;
1206                                 break;
1207                         }
1208                         if (matched)
1209                                 rec->flags |= flag;
1210                 }
1211                 pg = pg->next;
1212         }
1213         spin_unlock(&ftrace_lock);
1214 }
1215
1216 static ssize_t
1217 ftrace_regex_write(struct file *file, const char __user *ubuf,
1218                    size_t cnt, loff_t *ppos, int enable)
1219 {
1220         struct ftrace_iterator *iter;
1221         char ch;
1222         size_t read = 0;
1223         ssize_t ret;
1224
1225         if (!cnt || cnt < 0)
1226                 return 0;
1227
1228         mutex_lock(&ftrace_regex_lock);
1229
1230         if (file->f_mode & FMODE_READ) {
1231                 struct seq_file *m = file->private_data;
1232                 iter = m->private;
1233         } else
1234                 iter = file->private_data;
1235
1236         if (!*ppos) {
1237                 iter->flags &= ~FTRACE_ITER_CONT;
1238                 iter->buffer_idx = 0;
1239         }
1240
1241         ret = get_user(ch, ubuf++);
1242         if (ret)
1243                 goto out;
1244         read++;
1245         cnt--;
1246
1247         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1248                 /* skip white space */
1249                 while (cnt && isspace(ch)) {
1250                         ret = get_user(ch, ubuf++);
1251                         if (ret)
1252                                 goto out;
1253                         read++;
1254                         cnt--;
1255                 }
1256
1257                 if (isspace(ch)) {
1258                         file->f_pos += read;
1259                         ret = read;
1260                         goto out;
1261                 }
1262
1263                 iter->buffer_idx = 0;
1264         }
1265
1266         while (cnt && !isspace(ch)) {
1267                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1268                         iter->buffer[iter->buffer_idx++] = ch;
1269                 else {
1270                         ret = -EINVAL;
1271                         goto out;
1272                 }
1273                 ret = get_user(ch, ubuf++);
1274                 if (ret)
1275                         goto out;
1276                 read++;
1277                 cnt--;
1278         }
1279
1280         if (isspace(ch)) {
1281                 iter->filtered++;
1282                 iter->buffer[iter->buffer_idx] = 0;
1283                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1284                 iter->buffer_idx = 0;
1285         } else
1286                 iter->flags |= FTRACE_ITER_CONT;
1287
1288
1289         file->f_pos += read;
1290
1291         ret = read;
1292  out:
1293         mutex_unlock(&ftrace_regex_lock);
1294
1295         return ret;
1296 }
1297
1298 static ssize_t
1299 ftrace_filter_write(struct file *file, const char __user *ubuf,
1300                     size_t cnt, loff_t *ppos)
1301 {
1302         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1303 }
1304
1305 static ssize_t
1306 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1307                      size_t cnt, loff_t *ppos)
1308 {
1309         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1310 }
1311
1312 static void
1313 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1314 {
1315         if (unlikely(ftrace_disabled))
1316                 return;
1317
1318         mutex_lock(&ftrace_regex_lock);
1319         if (reset)
1320                 ftrace_filter_reset(enable);
1321         if (buf)
1322                 ftrace_match(buf, len, enable);
1323         mutex_unlock(&ftrace_regex_lock);
1324 }
1325
1326 /**
1327  * ftrace_set_filter - set a function to filter on in ftrace
1328  * @buf - the string that holds the function filter text.
1329  * @len - the length of the string.
1330  * @reset - non zero to reset all filters before applying this filter.
1331  *
1332  * Filters denote which functions should be enabled when tracing is enabled.
1333  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1334  */
1335 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1336 {
1337         ftrace_set_regex(buf, len, reset, 1);
1338 }
1339
1340 /**
1341  * ftrace_set_notrace - set a function to not trace in ftrace
1342  * @buf - the string that holds the function notrace text.
1343  * @len - the length of the string.
1344  * @reset - non zero to reset all filters before applying this filter.
1345  *
1346  * Notrace Filters denote which functions should not be enabled when tracing
1347  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1348  * for tracing.
1349  */
1350 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1351 {
1352         ftrace_set_regex(buf, len, reset, 0);
1353 }
1354
1355 static int
1356 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1357 {
1358         struct seq_file *m = (struct seq_file *)file->private_data;
1359         struct ftrace_iterator *iter;
1360
1361         mutex_lock(&ftrace_regex_lock);
1362         if (file->f_mode & FMODE_READ) {
1363                 iter = m->private;
1364
1365                 seq_release(inode, file);
1366         } else
1367                 iter = file->private_data;
1368
1369         if (iter->buffer_idx) {
1370                 iter->filtered++;
1371                 iter->buffer[iter->buffer_idx] = 0;
1372                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1373         }
1374
1375         mutex_lock(&ftrace_sysctl_lock);
1376         mutex_lock(&ftraced_lock);
1377         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1378                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1379         mutex_unlock(&ftraced_lock);
1380         mutex_unlock(&ftrace_sysctl_lock);
1381
1382         kfree(iter);
1383         mutex_unlock(&ftrace_regex_lock);
1384         return 0;
1385 }
1386
1387 static int
1388 ftrace_filter_release(struct inode *inode, struct file *file)
1389 {
1390         return ftrace_regex_release(inode, file, 1);
1391 }
1392
1393 static int
1394 ftrace_notrace_release(struct inode *inode, struct file *file)
1395 {
1396         return ftrace_regex_release(inode, file, 0);
1397 }
1398
1399 static ssize_t
1400 ftraced_read(struct file *filp, char __user *ubuf,
1401                      size_t cnt, loff_t *ppos)
1402 {
1403         /* don't worry about races */
1404         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1405         int r = strlen(buf);
1406
1407         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1408 }
1409
1410 static ssize_t
1411 ftraced_write(struct file *filp, const char __user *ubuf,
1412                       size_t cnt, loff_t *ppos)
1413 {
1414         char buf[64];
1415         long val;
1416         int ret;
1417
1418         if (cnt >= sizeof(buf))
1419                 return -EINVAL;
1420
1421         if (copy_from_user(&buf, ubuf, cnt))
1422                 return -EFAULT;
1423
1424         if (strncmp(buf, "enable", 6) == 0)
1425                 val = 1;
1426         else if (strncmp(buf, "disable", 7) == 0)
1427                 val = 0;
1428         else {
1429                 buf[cnt] = 0;
1430
1431                 ret = strict_strtoul(buf, 10, &val);
1432                 if (ret < 0)
1433                         return ret;
1434
1435                 val = !!val;
1436         }
1437
1438         if (val)
1439                 ftrace_enable_daemon();
1440         else
1441                 ftrace_disable_daemon();
1442
1443         filp->f_pos += cnt;
1444
1445         return cnt;
1446 }
1447
1448 static struct file_operations ftrace_avail_fops = {
1449         .open = ftrace_avail_open,
1450         .read = seq_read,
1451         .llseek = seq_lseek,
1452         .release = ftrace_avail_release,
1453 };
1454
1455 static struct file_operations ftrace_failures_fops = {
1456         .open = ftrace_failures_open,
1457         .read = seq_read,
1458         .llseek = seq_lseek,
1459         .release = ftrace_avail_release,
1460 };
1461
1462 static struct file_operations ftrace_filter_fops = {
1463         .open = ftrace_filter_open,
1464         .read = ftrace_regex_read,
1465         .write = ftrace_filter_write,
1466         .llseek = ftrace_regex_lseek,
1467         .release = ftrace_filter_release,
1468 };
1469
1470 static struct file_operations ftrace_notrace_fops = {
1471         .open = ftrace_notrace_open,
1472         .read = ftrace_regex_read,
1473         .write = ftrace_notrace_write,
1474         .llseek = ftrace_regex_lseek,
1475         .release = ftrace_notrace_release,
1476 };
1477
1478 static struct file_operations ftraced_fops = {
1479         .open = tracing_open_generic,
1480         .read = ftraced_read,
1481         .write = ftraced_write,
1482 };
1483
1484 /**
1485  * ftrace_force_update - force an update to all recording ftrace functions
1486  */
1487 int ftrace_force_update(void)
1488 {
1489         int ret = 0;
1490
1491         if (unlikely(ftrace_disabled))
1492                 return -ENODEV;
1493
1494         mutex_lock(&ftrace_sysctl_lock);
1495         mutex_lock(&ftraced_lock);
1496
1497         /*
1498          * If ftraced_trigger is not set, then there is nothing
1499          * to update.
1500          */
1501         if (ftraced_trigger && !ftrace_update_code())
1502                 ret = -EBUSY;
1503
1504         mutex_unlock(&ftraced_lock);
1505         mutex_unlock(&ftrace_sysctl_lock);
1506
1507         return ret;
1508 }
1509
1510 static void ftrace_force_shutdown(void)
1511 {
1512         struct task_struct *task;
1513         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1514
1515         mutex_lock(&ftraced_lock);
1516         task = ftraced_task;
1517         ftraced_task = NULL;
1518         ftraced_suspend = -1;
1519         ftrace_run_update_code(command);
1520         mutex_unlock(&ftraced_lock);
1521
1522         if (task)
1523                 kthread_stop(task);
1524 }
1525
1526 static __init int ftrace_init_debugfs(void)
1527 {
1528         struct dentry *d_tracer;
1529         struct dentry *entry;
1530
1531         d_tracer = tracing_init_dentry();
1532
1533         entry = debugfs_create_file("available_filter_functions", 0444,
1534                                     d_tracer, NULL, &ftrace_avail_fops);
1535         if (!entry)
1536                 pr_warning("Could not create debugfs "
1537                            "'available_filter_functions' entry\n");
1538
1539         entry = debugfs_create_file("failures", 0444,
1540                                     d_tracer, NULL, &ftrace_failures_fops);
1541         if (!entry)
1542                 pr_warning("Could not create debugfs 'failures' entry\n");
1543
1544         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1545                                     NULL, &ftrace_filter_fops);
1546         if (!entry)
1547                 pr_warning("Could not create debugfs "
1548                            "'set_ftrace_filter' entry\n");
1549
1550         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1551                                     NULL, &ftrace_notrace_fops);
1552         if (!entry)
1553                 pr_warning("Could not create debugfs "
1554                            "'set_ftrace_notrace' entry\n");
1555
1556         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1557                                     NULL, &ftraced_fops);
1558         if (!entry)
1559                 pr_warning("Could not create debugfs "
1560                            "'ftraced_enabled' entry\n");
1561         return 0;
1562 }
1563
1564 fs_initcall(ftrace_init_debugfs);
1565
1566 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1567 static int ftrace_convert_nops(unsigned long *start,
1568                                unsigned long *end)
1569 {
1570         unsigned long *p;
1571         unsigned long addr;
1572         unsigned long flags;
1573
1574         p = start;
1575         while (p < end) {
1576                 addr = ftrace_call_adjust(*p++);
1577                 /* should not be called from interrupt context */
1578                 spin_lock(&ftrace_lock);
1579                 ftrace_record_ip(addr);
1580                 spin_unlock(&ftrace_lock);
1581                 ftrace_shutdown_replenish();
1582         }
1583
1584         /* p is ignored */
1585         local_irq_save(flags);
1586         __ftrace_update_code(p);
1587         local_irq_restore(flags);
1588
1589         return 0;
1590 }
1591
1592 void ftrace_init_module(unsigned long *start, unsigned long *end)
1593 {
1594         if (ftrace_disabled || start == end)
1595                 return;
1596         ftrace_convert_nops(start, end);
1597 }
1598
1599 extern unsigned long __start_mcount_loc[];
1600 extern unsigned long __stop_mcount_loc[];
1601
1602 void __init ftrace_init(void)
1603 {
1604         unsigned long count, addr, flags;
1605         int ret;
1606
1607         /* Keep the ftrace pointer to the stub */
1608         addr = (unsigned long)ftrace_stub;
1609
1610         local_irq_save(flags);
1611         ftrace_dyn_arch_init(&addr);
1612         local_irq_restore(flags);
1613
1614         /* ftrace_dyn_arch_init places the return code in addr */
1615         if (addr)
1616                 goto failed;
1617
1618         count = __stop_mcount_loc - __start_mcount_loc;
1619
1620         ret = ftrace_dyn_table_alloc(count);
1621         if (ret)
1622                 goto failed;
1623
1624         last_ftrace_enabled = ftrace_enabled = 1;
1625
1626         ret = ftrace_convert_nops(__start_mcount_loc,
1627                                   __stop_mcount_loc);
1628
1629         return;
1630  failed:
1631         ftrace_disabled = 1;
1632 }
1633 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1634 static int ftraced(void *ignore)
1635 {
1636         unsigned long usecs;
1637
1638         while (!kthread_should_stop()) {
1639
1640                 set_current_state(TASK_INTERRUPTIBLE);
1641
1642                 /* check once a second */
1643                 schedule_timeout(HZ);
1644
1645                 if (unlikely(ftrace_disabled))
1646                         continue;
1647
1648                 mutex_lock(&ftrace_sysctl_lock);
1649                 mutex_lock(&ftraced_lock);
1650                 if (!ftraced_suspend && !ftraced_stop &&
1651                     ftrace_update_code()) {
1652                         usecs = nsecs_to_usecs(ftrace_update_time);
1653                         if (ftrace_update_tot_cnt > 100000) {
1654                                 ftrace_update_tot_cnt = 0;
1655                                 pr_info("hm, dftrace overflow: %lu change%s"
1656                                         " (%lu total) in %lu usec%s\n",
1657                                         ftrace_update_cnt,
1658                                         ftrace_update_cnt != 1 ? "s" : "",
1659                                         ftrace_update_tot_cnt,
1660                                         usecs, usecs != 1 ? "s" : "");
1661                                 ftrace_disabled = 1;
1662                                 WARN_ON_ONCE(1);
1663                         }
1664                 }
1665                 mutex_unlock(&ftraced_lock);
1666                 mutex_unlock(&ftrace_sysctl_lock);
1667
1668                 ftrace_shutdown_replenish();
1669         }
1670         __set_current_state(TASK_RUNNING);
1671         return 0;
1672 }
1673
1674 static int __init ftrace_dynamic_init(void)
1675 {
1676         struct task_struct *p;
1677         unsigned long addr;
1678         int ret;
1679
1680         addr = (unsigned long)ftrace_record_ip;
1681
1682         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1683
1684         /* ftrace_dyn_arch_init places the return code in addr */
1685         if (addr) {
1686                 ret = (int)addr;
1687                 goto failed;
1688         }
1689
1690         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1691         if (ret)
1692                 goto failed;
1693
1694         p = kthread_run(ftraced, NULL, "ftraced");
1695         if (IS_ERR(p)) {
1696                 ret = -1;
1697                 goto failed;
1698         }
1699
1700         last_ftrace_enabled = ftrace_enabled = 1;
1701         ftraced_task = p;
1702
1703         return 0;
1704
1705  failed:
1706         ftrace_disabled = 1;
1707         return ret;
1708 }
1709
1710 core_initcall(ftrace_dynamic_init);
1711 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1712
1713 #else
1714 # define ftrace_startup()               do { } while (0)
1715 # define ftrace_shutdown()              do { } while (0)
1716 # define ftrace_startup_sysctl()        do { } while (0)
1717 # define ftrace_shutdown_sysctl()       do { } while (0)
1718 # define ftrace_force_shutdown()        do { } while (0)
1719 #endif /* CONFIG_DYNAMIC_FTRACE */
1720
1721 /**
1722  * ftrace_kill_atomic - kill ftrace from critical sections
1723  *
1724  * This function should be used by panic code. It stops ftrace
1725  * but in a not so nice way. If you need to simply kill ftrace
1726  * from a non-atomic section, use ftrace_kill.
1727  */
1728 void ftrace_kill_atomic(void)
1729 {
1730         ftrace_disabled = 1;
1731         ftrace_enabled = 0;
1732 #ifdef CONFIG_DYNAMIC_FTRACE
1733         ftraced_suspend = -1;
1734 #endif
1735         clear_ftrace_function();
1736 }
1737
1738 /**
1739  * ftrace_kill - totally shutdown ftrace
1740  *
1741  * This is a safety measure. If something was detected that seems
1742  * wrong, calling this function will keep ftrace from doing
1743  * any more modifications, and updates.
1744  * used when something went wrong.
1745  */
1746 void ftrace_kill(void)
1747 {
1748         mutex_lock(&ftrace_sysctl_lock);
1749         ftrace_disabled = 1;
1750         ftrace_enabled = 0;
1751
1752         clear_ftrace_function();
1753         mutex_unlock(&ftrace_sysctl_lock);
1754
1755         /* Try to totally disable ftrace */
1756         ftrace_force_shutdown();
1757 }
1758
1759 /**
1760  * register_ftrace_function - register a function for profiling
1761  * @ops - ops structure that holds the function for profiling.
1762  *
1763  * Register a function to be called by all functions in the
1764  * kernel.
1765  *
1766  * Note: @ops->func and all the functions it calls must be labeled
1767  *       with "notrace", otherwise it will go into a
1768  *       recursive loop.
1769  */
1770 int register_ftrace_function(struct ftrace_ops *ops)
1771 {
1772         int ret;
1773
1774         if (unlikely(ftrace_disabled))
1775                 return -1;
1776
1777         mutex_lock(&ftrace_sysctl_lock);
1778         ret = __register_ftrace_function(ops);
1779         ftrace_startup();
1780         mutex_unlock(&ftrace_sysctl_lock);
1781
1782         return ret;
1783 }
1784
1785 /**
1786  * unregister_ftrace_function - unresgister a function for profiling.
1787  * @ops - ops structure that holds the function to unregister
1788  *
1789  * Unregister a function that was added to be called by ftrace profiling.
1790  */
1791 int unregister_ftrace_function(struct ftrace_ops *ops)
1792 {
1793         int ret;
1794
1795         mutex_lock(&ftrace_sysctl_lock);
1796         ret = __unregister_ftrace_function(ops);
1797         ftrace_shutdown();
1798         mutex_unlock(&ftrace_sysctl_lock);
1799
1800         return ret;
1801 }
1802
1803 int
1804 ftrace_enable_sysctl(struct ctl_table *table, int write,
1805                      struct file *file, void __user *buffer, size_t *lenp,
1806                      loff_t *ppos)
1807 {
1808         int ret;
1809
1810         if (unlikely(ftrace_disabled))
1811                 return -ENODEV;
1812
1813         mutex_lock(&ftrace_sysctl_lock);
1814
1815         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1816
1817         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1818                 goto out;
1819
1820         last_ftrace_enabled = ftrace_enabled;
1821
1822         if (ftrace_enabled) {
1823
1824                 ftrace_startup_sysctl();
1825
1826                 /* we are starting ftrace again */
1827                 if (ftrace_list != &ftrace_list_end) {
1828                         if (ftrace_list->next == &ftrace_list_end)
1829                                 ftrace_trace_function = ftrace_list->func;
1830                         else
1831                                 ftrace_trace_function = ftrace_list_func;
1832                 }
1833
1834         } else {
1835                 /* stopping ftrace calls (just send to ftrace_stub) */
1836                 ftrace_trace_function = ftrace_stub;
1837
1838                 ftrace_shutdown_sysctl();
1839         }
1840
1841  out:
1842         mutex_unlock(&ftrace_sysctl_lock);
1843         return ret;
1844 }