]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
b2de8de773560730c93a14f831ba38d4e1e0ba96
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* should not be called from interrupt context */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         /* should not be called from interrupt context */
119         spin_lock(&ftrace_lock);
120
121         /*
122          * If we are removing the last function, then simply point
123          * to the ftrace_stub.
124          */
125         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126                 ftrace_trace_function = ftrace_stub;
127                 ftrace_list = &ftrace_list_end;
128                 goto out;
129         }
130
131         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132                 if (*p == ops)
133                         break;
134
135         if (*p != ops) {
136                 ret = -1;
137                 goto out;
138         }
139
140         *p = (*p)->next;
141
142         if (ftrace_enabled) {
143                 /* If we only have one func left, then call that directly */
144                 if (ftrace_list == &ftrace_list_end ||
145                     ftrace_list->next == &ftrace_list_end)
146                         ftrace_trace_function = ftrace_list->func;
147         }
148
149  out:
150         spin_unlock(&ftrace_lock);
151
152         return ret;
153 }
154
155 #ifdef CONFIG_DYNAMIC_FTRACE
156
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
158 /*
159  * The hash lock is only needed when the recording of the mcount
160  * callers are dynamic. That is, by the caller themselves and
161  * not recorded via the compilation.
162  */
163 static DEFINE_SPINLOCK(ftrace_hash_lock);
164 #define ftrace_hash_lock(flags)   spin_lock_irqsave(&ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) \
166                         spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167 static void ftrace_release_hash(unsigned long start, unsigned long end);
168 #else
169 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
170 #define ftrace_hash_lock(flags)   do { (void)(flags); } while (0)
171 #define ftrace_hash_unlock(flags) do { } while(0)
172 static inline void ftrace_release_hash(unsigned long start, unsigned long end)
173 {
174 }
175 #endif
176
177 /*
178  * Since MCOUNT_ADDR may point to mcount itself, we do not want
179  * to get it confused by reading a reference in the code as we
180  * are parsing on objcopy output of text. Use a variable for
181  * it instead.
182  */
183 static unsigned long mcount_addr = MCOUNT_ADDR;
184
185 static struct task_struct *ftraced_task;
186
187 enum {
188         FTRACE_ENABLE_CALLS             = (1 << 0),
189         FTRACE_DISABLE_CALLS            = (1 << 1),
190         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
191         FTRACE_ENABLE_MCOUNT            = (1 << 3),
192         FTRACE_DISABLE_MCOUNT           = (1 << 4),
193 };
194
195 static int ftrace_filtered;
196 static int tracing_on;
197 static int frozen_record_count;
198
199 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
200
201 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
202
203 static DEFINE_MUTEX(ftraced_lock);
204 static DEFINE_MUTEX(ftrace_regex_lock);
205
206 struct ftrace_page {
207         struct ftrace_page      *next;
208         unsigned long           index;
209         struct dyn_ftrace       records[];
210 };
211
212 #define ENTRIES_PER_PAGE \
213   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
214
215 /* estimate from running different kernels */
216 #define NR_TO_INIT              10000
217
218 static struct ftrace_page       *ftrace_pages_start;
219 static struct ftrace_page       *ftrace_pages;
220
221 static int ftraced_trigger;
222 static int ftraced_suspend;
223 static int ftraced_stop;
224
225 static int ftrace_record_suspend;
226
227 static struct dyn_ftrace *ftrace_free_records;
228
229
230 #ifdef CONFIG_KPROBES
231 static inline void freeze_record(struct dyn_ftrace *rec)
232 {
233         if (!(rec->flags & FTRACE_FL_FROZEN)) {
234                 rec->flags |= FTRACE_FL_FROZEN;
235                 frozen_record_count++;
236         }
237 }
238
239 static inline void unfreeze_record(struct dyn_ftrace *rec)
240 {
241         if (rec->flags & FTRACE_FL_FROZEN) {
242                 rec->flags &= ~FTRACE_FL_FROZEN;
243                 frozen_record_count--;
244         }
245 }
246
247 static inline int record_frozen(struct dyn_ftrace *rec)
248 {
249         return rec->flags & FTRACE_FL_FROZEN;
250 }
251 #else
252 # define freeze_record(rec)                     ({ 0; })
253 # define unfreeze_record(rec)                   ({ 0; })
254 # define record_frozen(rec)                     ({ 0; })
255 #endif /* CONFIG_KPROBES */
256
257 int skip_trace(unsigned long ip)
258 {
259         unsigned long fl;
260         struct dyn_ftrace *rec;
261         struct hlist_node *t;
262         struct hlist_head *head;
263
264         if (frozen_record_count == 0)
265                 return 0;
266
267         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
268         hlist_for_each_entry_rcu(rec, t, head, node) {
269                 if (rec->ip == ip) {
270                         if (record_frozen(rec)) {
271                                 if (rec->flags & FTRACE_FL_FAILED)
272                                         return 1;
273
274                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
275                                         return 1;
276
277                                 if (!tracing_on || !ftrace_enabled)
278                                         return 1;
279
280                                 if (ftrace_filtered) {
281                                         fl = rec->flags & (FTRACE_FL_FILTER |
282                                                            FTRACE_FL_NOTRACE);
283                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
284                                                 return 1;
285                                 }
286                         }
287                         break;
288                 }
289         }
290
291         return 0;
292 }
293
294 static inline int
295 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
296 {
297         struct dyn_ftrace *p;
298         struct hlist_node *t;
299         int found = 0;
300
301         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
302                 if (p->ip == ip) {
303                         found = 1;
304                         break;
305                 }
306         }
307
308         return found;
309 }
310
311 static inline void
312 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
313 {
314         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
315 }
316
317 /* called from kstop_machine */
318 static inline void ftrace_del_hash(struct dyn_ftrace *node)
319 {
320         hlist_del(&node->node);
321 }
322
323 static void ftrace_free_rec(struct dyn_ftrace *rec)
324 {
325         rec->ip = (unsigned long)ftrace_free_records;
326         ftrace_free_records = rec;
327         rec->flags |= FTRACE_FL_FREE;
328 }
329
330 void ftrace_release(void *start, unsigned long size)
331 {
332         struct dyn_ftrace *rec;
333         struct ftrace_page *pg;
334         unsigned long s = (unsigned long)start;
335         unsigned long e = s + size;
336         int i;
337
338         if (ftrace_disabled || !start)
339                 return;
340
341         /* should not be called from interrupt context */
342         spin_lock(&ftrace_lock);
343
344         for (pg = ftrace_pages_start; pg; pg = pg->next) {
345                 for (i = 0; i < pg->index; i++) {
346                         rec = &pg->records[i];
347
348                         if ((rec->ip >= s) && (rec->ip < e))
349                                 ftrace_free_rec(rec);
350                 }
351         }
352         spin_unlock(&ftrace_lock);
353
354         ftrace_release_hash(s, e);
355 }
356
357 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
358 {
359         struct dyn_ftrace *rec;
360
361         /* First check for freed records */
362         if (ftrace_free_records) {
363                 rec = ftrace_free_records;
364
365                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
366                         WARN_ON_ONCE(1);
367                         ftrace_free_records = NULL;
368                         ftrace_disabled = 1;
369                         ftrace_enabled = 0;
370                         return NULL;
371                 }
372
373                 ftrace_free_records = (void *)rec->ip;
374                 memset(rec, 0, sizeof(*rec));
375                 return rec;
376         }
377
378         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
379                 if (!ftrace_pages->next)
380                         return NULL;
381                 ftrace_pages = ftrace_pages->next;
382         }
383
384         return &ftrace_pages->records[ftrace_pages->index++];
385 }
386
387 static void
388 ftrace_record_ip(unsigned long ip)
389 {
390         struct dyn_ftrace *node;
391         unsigned long flags;
392         unsigned long key;
393         int resched;
394         int cpu;
395
396         if (!ftrace_enabled || ftrace_disabled)
397                 return;
398
399         resched = need_resched();
400         preempt_disable_notrace();
401
402         /*
403          * We simply need to protect against recursion.
404          * Use the the raw version of smp_processor_id and not
405          * __get_cpu_var which can call debug hooks that can
406          * cause a recursive crash here.
407          */
408         cpu = raw_smp_processor_id();
409         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
410         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
411                 goto out;
412
413         if (unlikely(ftrace_record_suspend))
414                 goto out;
415
416         key = hash_long(ip, FTRACE_HASHBITS);
417
418         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
419
420         if (ftrace_ip_in_hash(ip, key))
421                 goto out;
422
423         ftrace_hash_lock(flags);
424
425         /* This ip may have hit the hash before the lock */
426         if (ftrace_ip_in_hash(ip, key))
427                 goto out_unlock;
428
429         node = ftrace_alloc_dyn_node(ip);
430         if (!node)
431                 goto out_unlock;
432
433         node->ip = ip;
434
435         ftrace_add_hash(node, key);
436
437         ftraced_trigger = 1;
438
439  out_unlock:
440         ftrace_hash_unlock(flags);
441  out:
442         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
443
444         /* prevent recursion with scheduler */
445         if (resched)
446                 preempt_enable_no_resched_notrace();
447         else
448                 preempt_enable_notrace();
449 }
450
451 #define FTRACE_ADDR ((long)(ftrace_caller))
452
453 static int
454 __ftrace_replace_code(struct dyn_ftrace *rec,
455                       unsigned char *old, unsigned char *new, int enable)
456 {
457         unsigned long ip, fl;
458
459         ip = rec->ip;
460
461         if (ftrace_filtered && enable) {
462                 /*
463                  * If filtering is on:
464                  *
465                  * If this record is set to be filtered and
466                  * is enabled then do nothing.
467                  *
468                  * If this record is set to be filtered and
469                  * it is not enabled, enable it.
470                  *
471                  * If this record is not set to be filtered
472                  * and it is not enabled do nothing.
473                  *
474                  * If this record is set not to trace then
475                  * do nothing.
476                  *
477                  * If this record is set not to trace and
478                  * it is enabled then disable it.
479                  *
480                  * If this record is not set to be filtered and
481                  * it is enabled, disable it.
482                  */
483
484                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
485                                    FTRACE_FL_ENABLED);
486
487                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
488                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
489                     !fl || (fl == FTRACE_FL_NOTRACE))
490                         return 0;
491
492                 /*
493                  * If it is enabled disable it,
494                  * otherwise enable it!
495                  */
496                 if (fl & FTRACE_FL_ENABLED) {
497                         /* swap new and old */
498                         new = old;
499                         old = ftrace_call_replace(ip, FTRACE_ADDR);
500                         rec->flags &= ~FTRACE_FL_ENABLED;
501                 } else {
502                         new = ftrace_call_replace(ip, FTRACE_ADDR);
503                         rec->flags |= FTRACE_FL_ENABLED;
504                 }
505         } else {
506
507                 if (enable) {
508                         /*
509                          * If this record is set not to trace and is
510                          * not enabled, do nothing.
511                          */
512                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
513                         if (fl == FTRACE_FL_NOTRACE)
514                                 return 0;
515
516                         new = ftrace_call_replace(ip, FTRACE_ADDR);
517                 } else
518                         old = ftrace_call_replace(ip, FTRACE_ADDR);
519
520                 if (enable) {
521                         if (rec->flags & FTRACE_FL_ENABLED)
522                                 return 0;
523                         rec->flags |= FTRACE_FL_ENABLED;
524                 } else {
525                         if (!(rec->flags & FTRACE_FL_ENABLED))
526                                 return 0;
527                         rec->flags &= ~FTRACE_FL_ENABLED;
528                 }
529         }
530
531         return ftrace_modify_code(ip, old, new);
532 }
533
534 static void ftrace_replace_code(int enable)
535 {
536         int i, failed;
537         unsigned char *new = NULL, *old = NULL;
538         struct dyn_ftrace *rec;
539         struct ftrace_page *pg;
540
541         if (enable)
542                 old = ftrace_nop_replace();
543         else
544                 new = ftrace_nop_replace();
545
546         for (pg = ftrace_pages_start; pg; pg = pg->next) {
547                 for (i = 0; i < pg->index; i++) {
548                         rec = &pg->records[i];
549
550                         /* don't modify code that has already faulted */
551                         if (rec->flags & FTRACE_FL_FAILED)
552                                 continue;
553
554                         /* ignore updates to this record's mcount site */
555                         if (get_kprobe((void *)rec->ip)) {
556                                 freeze_record(rec);
557                                 continue;
558                         } else {
559                                 unfreeze_record(rec);
560                         }
561
562                         failed = __ftrace_replace_code(rec, old, new, enable);
563                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
564                                 rec->flags |= FTRACE_FL_FAILED;
565                                 if ((system_state == SYSTEM_BOOTING) ||
566                                     !core_kernel_text(rec->ip)) {
567                                         ftrace_del_hash(rec);
568                                         ftrace_free_rec(rec);
569                                 }
570                         }
571                 }
572         }
573 }
574
575 static void ftrace_shutdown_replenish(void)
576 {
577         if (ftrace_pages->next)
578                 return;
579
580         /* allocate another page */
581         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
582 }
583
584 static void print_ip_ins(const char *fmt, unsigned char *p)
585 {
586         int i;
587
588         printk(KERN_CONT "%s", fmt);
589
590         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
591                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
592 }
593
594 static int
595 ftrace_code_disable(struct dyn_ftrace *rec)
596 {
597         unsigned long ip;
598         unsigned char *nop, *call;
599         int ret;
600
601         ip = rec->ip;
602
603         nop = ftrace_nop_replace();
604         call = ftrace_call_replace(ip, mcount_addr);
605
606         ret = ftrace_modify_code(ip, call, nop);
607         if (ret) {
608                 switch (ret) {
609                 case -EFAULT:
610                         WARN_ON_ONCE(1);
611                         pr_info("ftrace faulted on modifying ");
612                         print_ip_sym(ip);
613                         break;
614                 case -EINVAL:
615                         WARN_ON_ONCE(1);
616                         pr_info("ftrace failed to modify ");
617                         print_ip_sym(ip);
618                         print_ip_ins(" expected: ", call);
619                         print_ip_ins(" actual: ", (unsigned char *)ip);
620                         print_ip_ins(" replace: ", nop);
621                         printk(KERN_CONT "\n");
622                         break;
623                 case -EPERM:
624                         WARN_ON_ONCE(1);
625                         pr_info("ftrace faulted on writing ");
626                         print_ip_sym(ip);
627                         break;
628                 default:
629                         WARN_ON_ONCE(1);
630                         pr_info("ftrace faulted on unknown error ");
631                         print_ip_sym(ip);
632                 }
633
634                 rec->flags |= FTRACE_FL_FAILED;
635                 return 0;
636         }
637         return 1;
638 }
639
640 static int __ftrace_update_code(void *ignore);
641
642 static int __ftrace_modify_code(void *data)
643 {
644         unsigned long addr;
645         int *command = data;
646
647         if (*command & FTRACE_ENABLE_CALLS) {
648                 /*
649                  * Update any recorded ips now that we have the
650                  * machine stopped
651                  */
652                 __ftrace_update_code(NULL);
653                 ftrace_replace_code(1);
654                 tracing_on = 1;
655         } else if (*command & FTRACE_DISABLE_CALLS) {
656                 ftrace_replace_code(0);
657                 tracing_on = 0;
658         }
659
660         if (*command & FTRACE_UPDATE_TRACE_FUNC)
661                 ftrace_update_ftrace_func(ftrace_trace_function);
662
663         if (*command & FTRACE_ENABLE_MCOUNT) {
664                 addr = (unsigned long)ftrace_record_ip;
665                 ftrace_mcount_set(&addr);
666         } else if (*command & FTRACE_DISABLE_MCOUNT) {
667                 addr = (unsigned long)ftrace_stub;
668                 ftrace_mcount_set(&addr);
669         }
670
671         return 0;
672 }
673
674 static void ftrace_run_update_code(int command)
675 {
676         stop_machine(__ftrace_modify_code, &command, NULL);
677 }
678
679 void ftrace_disable_daemon(void)
680 {
681         /* Stop the daemon from calling kstop_machine */
682         mutex_lock(&ftraced_lock);
683         ftraced_stop = 1;
684         mutex_unlock(&ftraced_lock);
685
686         ftrace_force_update();
687 }
688
689 void ftrace_enable_daemon(void)
690 {
691         mutex_lock(&ftraced_lock);
692         ftraced_stop = 0;
693         mutex_unlock(&ftraced_lock);
694
695         ftrace_force_update();
696 }
697
698 static ftrace_func_t saved_ftrace_func;
699
700 static void ftrace_startup(void)
701 {
702         int command = 0;
703
704         if (unlikely(ftrace_disabled))
705                 return;
706
707         mutex_lock(&ftraced_lock);
708         ftraced_suspend++;
709         if (ftraced_suspend == 1)
710                 command |= FTRACE_ENABLE_CALLS;
711
712         if (saved_ftrace_func != ftrace_trace_function) {
713                 saved_ftrace_func = ftrace_trace_function;
714                 command |= FTRACE_UPDATE_TRACE_FUNC;
715         }
716
717         if (!command || !ftrace_enabled)
718                 goto out;
719
720         ftrace_run_update_code(command);
721  out:
722         mutex_unlock(&ftraced_lock);
723 }
724
725 static void ftrace_shutdown(void)
726 {
727         int command = 0;
728
729         if (unlikely(ftrace_disabled))
730                 return;
731
732         mutex_lock(&ftraced_lock);
733         ftraced_suspend--;
734         if (!ftraced_suspend)
735                 command |= FTRACE_DISABLE_CALLS;
736
737         if (saved_ftrace_func != ftrace_trace_function) {
738                 saved_ftrace_func = ftrace_trace_function;
739                 command |= FTRACE_UPDATE_TRACE_FUNC;
740         }
741
742         if (!command || !ftrace_enabled)
743                 goto out;
744
745         ftrace_run_update_code(command);
746  out:
747         mutex_unlock(&ftraced_lock);
748 }
749
750 static void ftrace_startup_sysctl(void)
751 {
752         int command = FTRACE_ENABLE_MCOUNT;
753
754         if (unlikely(ftrace_disabled))
755                 return;
756
757         mutex_lock(&ftraced_lock);
758         /* Force update next time */
759         saved_ftrace_func = NULL;
760         /* ftraced_suspend is true if we want ftrace running */
761         if (ftraced_suspend)
762                 command |= FTRACE_ENABLE_CALLS;
763
764         ftrace_run_update_code(command);
765         mutex_unlock(&ftraced_lock);
766 }
767
768 static void ftrace_shutdown_sysctl(void)
769 {
770         int command = FTRACE_DISABLE_MCOUNT;
771
772         if (unlikely(ftrace_disabled))
773                 return;
774
775         mutex_lock(&ftraced_lock);
776         /* ftraced_suspend is true if ftrace is running */
777         if (ftraced_suspend)
778                 command |= FTRACE_DISABLE_CALLS;
779
780         ftrace_run_update_code(command);
781         mutex_unlock(&ftraced_lock);
782 }
783
784 static cycle_t          ftrace_update_time;
785 static unsigned long    ftrace_update_cnt;
786 unsigned long           ftrace_update_tot_cnt;
787
788 static int __ftrace_update_code(void *ignore)
789 {
790         int i, save_ftrace_enabled;
791         cycle_t start, stop;
792         struct dyn_ftrace *p;
793         struct hlist_node *t, *n;
794         struct hlist_head *head, temp_list;
795
796         /* Don't be recording funcs now */
797         ftrace_record_suspend++;
798         save_ftrace_enabled = ftrace_enabled;
799         ftrace_enabled = 0;
800
801         start = ftrace_now(raw_smp_processor_id());
802         ftrace_update_cnt = 0;
803
804         /* No locks needed, the machine is stopped! */
805         for (i = 0; i < FTRACE_HASHSIZE; i++) {
806                 INIT_HLIST_HEAD(&temp_list);
807                 head = &ftrace_hash[i];
808
809                 /* all CPUS are stopped, we are safe to modify code */
810                 hlist_for_each_entry_safe(p, t, n, head, node) {
811                         /* Skip over failed records which have not been
812                          * freed. */
813                         if (p->flags & FTRACE_FL_FAILED)
814                                 continue;
815
816                         /* Unconverted records are always at the head of the
817                          * hash bucket. Once we encounter a converted record,
818                          * simply skip over to the next bucket. Saves ftraced
819                          * some processor cycles (ftrace does its bid for
820                          * global warming :-p ). */
821                         if (p->flags & (FTRACE_FL_CONVERTED))
822                                 break;
823
824                         /* Ignore updates to this record's mcount site.
825                          * Reintroduce this record at the head of this
826                          * bucket to attempt to "convert" it again if
827                          * the kprobe on it is unregistered before the
828                          * next run. */
829                         if (get_kprobe((void *)p->ip)) {
830                                 ftrace_del_hash(p);
831                                 INIT_HLIST_NODE(&p->node);
832                                 hlist_add_head(&p->node, &temp_list);
833                                 freeze_record(p);
834                                 continue;
835                         } else {
836                                 unfreeze_record(p);
837                         }
838
839                         /* convert record (i.e, patch mcount-call with NOP) */
840                         if (ftrace_code_disable(p)) {
841                                 p->flags |= FTRACE_FL_CONVERTED;
842                                 ftrace_update_cnt++;
843                         } else {
844                                 if ((system_state == SYSTEM_BOOTING) ||
845                                     !core_kernel_text(p->ip)) {
846                                         ftrace_del_hash(p);
847                                         ftrace_free_rec(p);
848                                 }
849                         }
850                 }
851
852                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
853                         hlist_del(&p->node);
854                         INIT_HLIST_NODE(&p->node);
855                         hlist_add_head(&p->node, head);
856                 }
857         }
858
859         stop = ftrace_now(raw_smp_processor_id());
860         ftrace_update_time = stop - start;
861         ftrace_update_tot_cnt += ftrace_update_cnt;
862         ftraced_trigger = 0;
863
864         ftrace_enabled = save_ftrace_enabled;
865         ftrace_record_suspend--;
866
867         return 0;
868 }
869
870 static int ftrace_update_code(void)
871 {
872         if (unlikely(ftrace_disabled) ||
873             !ftrace_enabled || !ftraced_trigger)
874                 return 0;
875
876         stop_machine(__ftrace_update_code, NULL, NULL);
877
878         return 1;
879 }
880
881 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
882 {
883         struct ftrace_page *pg;
884         int cnt;
885         int i;
886
887         /* allocate a few pages */
888         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
889         if (!ftrace_pages_start)
890                 return -1;
891
892         /*
893          * Allocate a few more pages.
894          *
895          * TODO: have some parser search vmlinux before
896          *   final linking to find all calls to ftrace.
897          *   Then we can:
898          *    a) know how many pages to allocate.
899          *     and/or
900          *    b) set up the table then.
901          *
902          *  The dynamic code is still necessary for
903          *  modules.
904          */
905
906         pg = ftrace_pages = ftrace_pages_start;
907
908         cnt = num_to_init / ENTRIES_PER_PAGE;
909         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
910                 num_to_init, cnt);
911
912         for (i = 0; i < cnt; i++) {
913                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
914
915                 /* If we fail, we'll try later anyway */
916                 if (!pg->next)
917                         break;
918
919                 pg = pg->next;
920         }
921
922         return 0;
923 }
924
925 enum {
926         FTRACE_ITER_FILTER      = (1 << 0),
927         FTRACE_ITER_CONT        = (1 << 1),
928         FTRACE_ITER_NOTRACE     = (1 << 2),
929         FTRACE_ITER_FAILURES    = (1 << 3),
930 };
931
932 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
933
934 struct ftrace_iterator {
935         loff_t                  pos;
936         struct ftrace_page      *pg;
937         unsigned                idx;
938         unsigned                flags;
939         unsigned char           buffer[FTRACE_BUFF_MAX+1];
940         unsigned                buffer_idx;
941         unsigned                filtered;
942 };
943
944 static void *
945 t_next(struct seq_file *m, void *v, loff_t *pos)
946 {
947         struct ftrace_iterator *iter = m->private;
948         struct dyn_ftrace *rec = NULL;
949
950         (*pos)++;
951
952         /* should not be called from interrupt context */
953         spin_lock(&ftrace_lock);
954  retry:
955         if (iter->idx >= iter->pg->index) {
956                 if (iter->pg->next) {
957                         iter->pg = iter->pg->next;
958                         iter->idx = 0;
959                         goto retry;
960                 }
961         } else {
962                 rec = &iter->pg->records[iter->idx++];
963                 if ((rec->flags & FTRACE_FL_FREE) ||
964
965                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
966                      (rec->flags & FTRACE_FL_FAILED)) ||
967
968                     ((iter->flags & FTRACE_ITER_FAILURES) &&
969                      !(rec->flags & FTRACE_FL_FAILED)) ||
970
971                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
972                      !(rec->flags & FTRACE_FL_NOTRACE))) {
973                         rec = NULL;
974                         goto retry;
975                 }
976         }
977         spin_unlock(&ftrace_lock);
978
979         iter->pos = *pos;
980
981         return rec;
982 }
983
984 static void *t_start(struct seq_file *m, loff_t *pos)
985 {
986         struct ftrace_iterator *iter = m->private;
987         void *p = NULL;
988         loff_t l = -1;
989
990         if (*pos != iter->pos) {
991                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
992                         ;
993         } else {
994                 l = *pos;
995                 p = t_next(m, p, &l);
996         }
997
998         return p;
999 }
1000
1001 static void t_stop(struct seq_file *m, void *p)
1002 {
1003 }
1004
1005 static int t_show(struct seq_file *m, void *v)
1006 {
1007         struct dyn_ftrace *rec = v;
1008         char str[KSYM_SYMBOL_LEN];
1009
1010         if (!rec)
1011                 return 0;
1012
1013         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1014
1015         seq_printf(m, "%s\n", str);
1016
1017         return 0;
1018 }
1019
1020 static struct seq_operations show_ftrace_seq_ops = {
1021         .start = t_start,
1022         .next = t_next,
1023         .stop = t_stop,
1024         .show = t_show,
1025 };
1026
1027 static int
1028 ftrace_avail_open(struct inode *inode, struct file *file)
1029 {
1030         struct ftrace_iterator *iter;
1031         int ret;
1032
1033         if (unlikely(ftrace_disabled))
1034                 return -ENODEV;
1035
1036         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1037         if (!iter)
1038                 return -ENOMEM;
1039
1040         iter->pg = ftrace_pages_start;
1041         iter->pos = -1;
1042
1043         ret = seq_open(file, &show_ftrace_seq_ops);
1044         if (!ret) {
1045                 struct seq_file *m = file->private_data;
1046
1047                 m->private = iter;
1048         } else {
1049                 kfree(iter);
1050         }
1051
1052         return ret;
1053 }
1054
1055 int ftrace_avail_release(struct inode *inode, struct file *file)
1056 {
1057         struct seq_file *m = (struct seq_file *)file->private_data;
1058         struct ftrace_iterator *iter = m->private;
1059
1060         seq_release(inode, file);
1061         kfree(iter);
1062
1063         return 0;
1064 }
1065
1066 static int
1067 ftrace_failures_open(struct inode *inode, struct file *file)
1068 {
1069         int ret;
1070         struct seq_file *m;
1071         struct ftrace_iterator *iter;
1072
1073         ret = ftrace_avail_open(inode, file);
1074         if (!ret) {
1075                 m = (struct seq_file *)file->private_data;
1076                 iter = (struct ftrace_iterator *)m->private;
1077                 iter->flags = FTRACE_ITER_FAILURES;
1078         }
1079
1080         return ret;
1081 }
1082
1083
1084 static void ftrace_filter_reset(int enable)
1085 {
1086         struct ftrace_page *pg;
1087         struct dyn_ftrace *rec;
1088         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1089         unsigned i;
1090
1091         /* should not be called from interrupt context */
1092         spin_lock(&ftrace_lock);
1093         if (enable)
1094                 ftrace_filtered = 0;
1095         pg = ftrace_pages_start;
1096         while (pg) {
1097                 for (i = 0; i < pg->index; i++) {
1098                         rec = &pg->records[i];
1099                         if (rec->flags & FTRACE_FL_FAILED)
1100                                 continue;
1101                         rec->flags &= ~type;
1102                 }
1103                 pg = pg->next;
1104         }
1105         spin_unlock(&ftrace_lock);
1106 }
1107
1108 static int
1109 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1110 {
1111         struct ftrace_iterator *iter;
1112         int ret = 0;
1113
1114         if (unlikely(ftrace_disabled))
1115                 return -ENODEV;
1116
1117         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1118         if (!iter)
1119                 return -ENOMEM;
1120
1121         mutex_lock(&ftrace_regex_lock);
1122         if ((file->f_mode & FMODE_WRITE) &&
1123             !(file->f_flags & O_APPEND))
1124                 ftrace_filter_reset(enable);
1125
1126         if (file->f_mode & FMODE_READ) {
1127                 iter->pg = ftrace_pages_start;
1128                 iter->pos = -1;
1129                 iter->flags = enable ? FTRACE_ITER_FILTER :
1130                         FTRACE_ITER_NOTRACE;
1131
1132                 ret = seq_open(file, &show_ftrace_seq_ops);
1133                 if (!ret) {
1134                         struct seq_file *m = file->private_data;
1135                         m->private = iter;
1136                 } else
1137                         kfree(iter);
1138         } else
1139                 file->private_data = iter;
1140         mutex_unlock(&ftrace_regex_lock);
1141
1142         return ret;
1143 }
1144
1145 static int
1146 ftrace_filter_open(struct inode *inode, struct file *file)
1147 {
1148         return ftrace_regex_open(inode, file, 1);
1149 }
1150
1151 static int
1152 ftrace_notrace_open(struct inode *inode, struct file *file)
1153 {
1154         return ftrace_regex_open(inode, file, 0);
1155 }
1156
1157 static ssize_t
1158 ftrace_regex_read(struct file *file, char __user *ubuf,
1159                        size_t cnt, loff_t *ppos)
1160 {
1161         if (file->f_mode & FMODE_READ)
1162                 return seq_read(file, ubuf, cnt, ppos);
1163         else
1164                 return -EPERM;
1165 }
1166
1167 static loff_t
1168 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1169 {
1170         loff_t ret;
1171
1172         if (file->f_mode & FMODE_READ)
1173                 ret = seq_lseek(file, offset, origin);
1174         else
1175                 file->f_pos = ret = 1;
1176
1177         return ret;
1178 }
1179
1180 enum {
1181         MATCH_FULL,
1182         MATCH_FRONT_ONLY,
1183         MATCH_MIDDLE_ONLY,
1184         MATCH_END_ONLY,
1185 };
1186
1187 static void
1188 ftrace_match(unsigned char *buff, int len, int enable)
1189 {
1190         char str[KSYM_SYMBOL_LEN];
1191         char *search = NULL;
1192         struct ftrace_page *pg;
1193         struct dyn_ftrace *rec;
1194         int type = MATCH_FULL;
1195         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1196         unsigned i, match = 0, search_len = 0;
1197
1198         for (i = 0; i < len; i++) {
1199                 if (buff[i] == '*') {
1200                         if (!i) {
1201                                 search = buff + i + 1;
1202                                 type = MATCH_END_ONLY;
1203                                 search_len = len - (i + 1);
1204                         } else {
1205                                 if (type == MATCH_END_ONLY) {
1206                                         type = MATCH_MIDDLE_ONLY;
1207                                 } else {
1208                                         match = i;
1209                                         type = MATCH_FRONT_ONLY;
1210                                 }
1211                                 buff[i] = 0;
1212                                 break;
1213                         }
1214                 }
1215         }
1216
1217         /* should not be called from interrupt context */
1218         spin_lock(&ftrace_lock);
1219         if (enable)
1220                 ftrace_filtered = 1;
1221         pg = ftrace_pages_start;
1222         while (pg) {
1223                 for (i = 0; i < pg->index; i++) {
1224                         int matched = 0;
1225                         char *ptr;
1226
1227                         rec = &pg->records[i];
1228                         if (rec->flags & FTRACE_FL_FAILED)
1229                                 continue;
1230                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1231                         switch (type) {
1232                         case MATCH_FULL:
1233                                 if (strcmp(str, buff) == 0)
1234                                         matched = 1;
1235                                 break;
1236                         case MATCH_FRONT_ONLY:
1237                                 if (memcmp(str, buff, match) == 0)
1238                                         matched = 1;
1239                                 break;
1240                         case MATCH_MIDDLE_ONLY:
1241                                 if (strstr(str, search))
1242                                         matched = 1;
1243                                 break;
1244                         case MATCH_END_ONLY:
1245                                 ptr = strstr(str, search);
1246                                 if (ptr && (ptr[search_len] == 0))
1247                                         matched = 1;
1248                                 break;
1249                         }
1250                         if (matched)
1251                                 rec->flags |= flag;
1252                 }
1253                 pg = pg->next;
1254         }
1255         spin_unlock(&ftrace_lock);
1256 }
1257
1258 static ssize_t
1259 ftrace_regex_write(struct file *file, const char __user *ubuf,
1260                    size_t cnt, loff_t *ppos, int enable)
1261 {
1262         struct ftrace_iterator *iter;
1263         char ch;
1264         size_t read = 0;
1265         ssize_t ret;
1266
1267         if (!cnt || cnt < 0)
1268                 return 0;
1269
1270         mutex_lock(&ftrace_regex_lock);
1271
1272         if (file->f_mode & FMODE_READ) {
1273                 struct seq_file *m = file->private_data;
1274                 iter = m->private;
1275         } else
1276                 iter = file->private_data;
1277
1278         if (!*ppos) {
1279                 iter->flags &= ~FTRACE_ITER_CONT;
1280                 iter->buffer_idx = 0;
1281         }
1282
1283         ret = get_user(ch, ubuf++);
1284         if (ret)
1285                 goto out;
1286         read++;
1287         cnt--;
1288
1289         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1290                 /* skip white space */
1291                 while (cnt && isspace(ch)) {
1292                         ret = get_user(ch, ubuf++);
1293                         if (ret)
1294                                 goto out;
1295                         read++;
1296                         cnt--;
1297                 }
1298
1299                 if (isspace(ch)) {
1300                         file->f_pos += read;
1301                         ret = read;
1302                         goto out;
1303                 }
1304
1305                 iter->buffer_idx = 0;
1306         }
1307
1308         while (cnt && !isspace(ch)) {
1309                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1310                         iter->buffer[iter->buffer_idx++] = ch;
1311                 else {
1312                         ret = -EINVAL;
1313                         goto out;
1314                 }
1315                 ret = get_user(ch, ubuf++);
1316                 if (ret)
1317                         goto out;
1318                 read++;
1319                 cnt--;
1320         }
1321
1322         if (isspace(ch)) {
1323                 iter->filtered++;
1324                 iter->buffer[iter->buffer_idx] = 0;
1325                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1326                 iter->buffer_idx = 0;
1327         } else
1328                 iter->flags |= FTRACE_ITER_CONT;
1329
1330
1331         file->f_pos += read;
1332
1333         ret = read;
1334  out:
1335         mutex_unlock(&ftrace_regex_lock);
1336
1337         return ret;
1338 }
1339
1340 static ssize_t
1341 ftrace_filter_write(struct file *file, const char __user *ubuf,
1342                     size_t cnt, loff_t *ppos)
1343 {
1344         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1345 }
1346
1347 static ssize_t
1348 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1349                      size_t cnt, loff_t *ppos)
1350 {
1351         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1352 }
1353
1354 static void
1355 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1356 {
1357         if (unlikely(ftrace_disabled))
1358                 return;
1359
1360         mutex_lock(&ftrace_regex_lock);
1361         if (reset)
1362                 ftrace_filter_reset(enable);
1363         if (buf)
1364                 ftrace_match(buf, len, enable);
1365         mutex_unlock(&ftrace_regex_lock);
1366 }
1367
1368 /**
1369  * ftrace_set_filter - set a function to filter on in ftrace
1370  * @buf - the string that holds the function filter text.
1371  * @len - the length of the string.
1372  * @reset - non zero to reset all filters before applying this filter.
1373  *
1374  * Filters denote which functions should be enabled when tracing is enabled.
1375  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1376  */
1377 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1378 {
1379         ftrace_set_regex(buf, len, reset, 1);
1380 }
1381
1382 /**
1383  * ftrace_set_notrace - set a function to not trace in ftrace
1384  * @buf - the string that holds the function notrace text.
1385  * @len - the length of the string.
1386  * @reset - non zero to reset all filters before applying this filter.
1387  *
1388  * Notrace Filters denote which functions should not be enabled when tracing
1389  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1390  * for tracing.
1391  */
1392 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1393 {
1394         ftrace_set_regex(buf, len, reset, 0);
1395 }
1396
1397 static int
1398 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1399 {
1400         struct seq_file *m = (struct seq_file *)file->private_data;
1401         struct ftrace_iterator *iter;
1402
1403         mutex_lock(&ftrace_regex_lock);
1404         if (file->f_mode & FMODE_READ) {
1405                 iter = m->private;
1406
1407                 seq_release(inode, file);
1408         } else
1409                 iter = file->private_data;
1410
1411         if (iter->buffer_idx) {
1412                 iter->filtered++;
1413                 iter->buffer[iter->buffer_idx] = 0;
1414                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1415         }
1416
1417         mutex_lock(&ftrace_sysctl_lock);
1418         mutex_lock(&ftraced_lock);
1419         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1420                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1421         mutex_unlock(&ftraced_lock);
1422         mutex_unlock(&ftrace_sysctl_lock);
1423
1424         kfree(iter);
1425         mutex_unlock(&ftrace_regex_lock);
1426         return 0;
1427 }
1428
1429 static int
1430 ftrace_filter_release(struct inode *inode, struct file *file)
1431 {
1432         return ftrace_regex_release(inode, file, 1);
1433 }
1434
1435 static int
1436 ftrace_notrace_release(struct inode *inode, struct file *file)
1437 {
1438         return ftrace_regex_release(inode, file, 0);
1439 }
1440
1441 static ssize_t
1442 ftraced_read(struct file *filp, char __user *ubuf,
1443                      size_t cnt, loff_t *ppos)
1444 {
1445         /* don't worry about races */
1446         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1447         int r = strlen(buf);
1448
1449         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1450 }
1451
1452 static ssize_t
1453 ftraced_write(struct file *filp, const char __user *ubuf,
1454                       size_t cnt, loff_t *ppos)
1455 {
1456         char buf[64];
1457         long val;
1458         int ret;
1459
1460         if (cnt >= sizeof(buf))
1461                 return -EINVAL;
1462
1463         if (copy_from_user(&buf, ubuf, cnt))
1464                 return -EFAULT;
1465
1466         if (strncmp(buf, "enable", 6) == 0)
1467                 val = 1;
1468         else if (strncmp(buf, "disable", 7) == 0)
1469                 val = 0;
1470         else {
1471                 buf[cnt] = 0;
1472
1473                 ret = strict_strtoul(buf, 10, &val);
1474                 if (ret < 0)
1475                         return ret;
1476
1477                 val = !!val;
1478         }
1479
1480         if (val)
1481                 ftrace_enable_daemon();
1482         else
1483                 ftrace_disable_daemon();
1484
1485         filp->f_pos += cnt;
1486
1487         return cnt;
1488 }
1489
1490 static struct file_operations ftrace_avail_fops = {
1491         .open = ftrace_avail_open,
1492         .read = seq_read,
1493         .llseek = seq_lseek,
1494         .release = ftrace_avail_release,
1495 };
1496
1497 static struct file_operations ftrace_failures_fops = {
1498         .open = ftrace_failures_open,
1499         .read = seq_read,
1500         .llseek = seq_lseek,
1501         .release = ftrace_avail_release,
1502 };
1503
1504 static struct file_operations ftrace_filter_fops = {
1505         .open = ftrace_filter_open,
1506         .read = ftrace_regex_read,
1507         .write = ftrace_filter_write,
1508         .llseek = ftrace_regex_lseek,
1509         .release = ftrace_filter_release,
1510 };
1511
1512 static struct file_operations ftrace_notrace_fops = {
1513         .open = ftrace_notrace_open,
1514         .read = ftrace_regex_read,
1515         .write = ftrace_notrace_write,
1516         .llseek = ftrace_regex_lseek,
1517         .release = ftrace_notrace_release,
1518 };
1519
1520 static struct file_operations ftraced_fops = {
1521         .open = tracing_open_generic,
1522         .read = ftraced_read,
1523         .write = ftraced_write,
1524 };
1525
1526 /**
1527  * ftrace_force_update - force an update to all recording ftrace functions
1528  */
1529 int ftrace_force_update(void)
1530 {
1531         int ret = 0;
1532
1533         if (unlikely(ftrace_disabled))
1534                 return -ENODEV;
1535
1536         mutex_lock(&ftrace_sysctl_lock);
1537         mutex_lock(&ftraced_lock);
1538
1539         /*
1540          * If ftraced_trigger is not set, then there is nothing
1541          * to update.
1542          */
1543         if (ftraced_trigger && !ftrace_update_code())
1544                 ret = -EBUSY;
1545
1546         mutex_unlock(&ftraced_lock);
1547         mutex_unlock(&ftrace_sysctl_lock);
1548
1549         return ret;
1550 }
1551
1552 static void ftrace_force_shutdown(void)
1553 {
1554         struct task_struct *task;
1555         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1556
1557         mutex_lock(&ftraced_lock);
1558         task = ftraced_task;
1559         ftraced_task = NULL;
1560         ftraced_suspend = -1;
1561         ftrace_run_update_code(command);
1562         mutex_unlock(&ftraced_lock);
1563
1564         if (task)
1565                 kthread_stop(task);
1566 }
1567
1568 static __init int ftrace_init_debugfs(void)
1569 {
1570         struct dentry *d_tracer;
1571         struct dentry *entry;
1572
1573         d_tracer = tracing_init_dentry();
1574
1575         entry = debugfs_create_file("available_filter_functions", 0444,
1576                                     d_tracer, NULL, &ftrace_avail_fops);
1577         if (!entry)
1578                 pr_warning("Could not create debugfs "
1579                            "'available_filter_functions' entry\n");
1580
1581         entry = debugfs_create_file("failures", 0444,
1582                                     d_tracer, NULL, &ftrace_failures_fops);
1583         if (!entry)
1584                 pr_warning("Could not create debugfs 'failures' entry\n");
1585
1586         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1587                                     NULL, &ftrace_filter_fops);
1588         if (!entry)
1589                 pr_warning("Could not create debugfs "
1590                            "'set_ftrace_filter' entry\n");
1591
1592         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1593                                     NULL, &ftrace_notrace_fops);
1594         if (!entry)
1595                 pr_warning("Could not create debugfs "
1596                            "'set_ftrace_notrace' entry\n");
1597
1598         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1599                                     NULL, &ftraced_fops);
1600         if (!entry)
1601                 pr_warning("Could not create debugfs "
1602                            "'ftraced_enabled' entry\n");
1603         return 0;
1604 }
1605
1606 fs_initcall(ftrace_init_debugfs);
1607
1608 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1609 static int ftrace_convert_nops(unsigned long *start,
1610                                unsigned long *end)
1611 {
1612         unsigned long *p;
1613         unsigned long addr;
1614         unsigned long flags;
1615
1616         p = start;
1617         while (p < end) {
1618                 addr = ftrace_call_adjust(*p++);
1619                 /* should not be called from interrupt context */
1620                 spin_lock(&ftrace_lock);
1621                 ftrace_record_ip(addr);
1622                 spin_unlock(&ftrace_lock);
1623                 ftrace_shutdown_replenish();
1624         }
1625
1626         /* p is ignored */
1627         local_irq_save(flags);
1628         __ftrace_update_code(p);
1629         local_irq_restore(flags);
1630
1631         return 0;
1632 }
1633
1634 void ftrace_init_module(unsigned long *start, unsigned long *end)
1635 {
1636         if (ftrace_disabled || start == end)
1637                 return;
1638         ftrace_convert_nops(start, end);
1639 }
1640
1641 extern unsigned long __start_mcount_loc[];
1642 extern unsigned long __stop_mcount_loc[];
1643
1644 void __init ftrace_init(void)
1645 {
1646         unsigned long count, addr, flags;
1647         int ret;
1648
1649         /* Keep the ftrace pointer to the stub */
1650         addr = (unsigned long)ftrace_stub;
1651
1652         local_irq_save(flags);
1653         ftrace_dyn_arch_init(&addr);
1654         local_irq_restore(flags);
1655
1656         /* ftrace_dyn_arch_init places the return code in addr */
1657         if (addr)
1658                 goto failed;
1659
1660         count = __stop_mcount_loc - __start_mcount_loc;
1661
1662         ret = ftrace_dyn_table_alloc(count);
1663         if (ret)
1664                 goto failed;
1665
1666         last_ftrace_enabled = ftrace_enabled = 1;
1667
1668         ret = ftrace_convert_nops(__start_mcount_loc,
1669                                   __stop_mcount_loc);
1670
1671         return;
1672  failed:
1673         ftrace_disabled = 1;
1674 }
1675 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1676
1677 static void ftrace_release_hash(unsigned long start, unsigned long end)
1678 {
1679         struct dyn_ftrace *rec;
1680         struct hlist_node *t, *n;
1681         struct hlist_head *head, temp_list;
1682         unsigned long flags;
1683         int i, cpu;
1684
1685         preempt_disable_notrace();
1686
1687         /* disable incase we call something that calls mcount */
1688         cpu = raw_smp_processor_id();
1689         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1690
1691         ftrace_hash_lock(flags);
1692
1693         for (i = 0; i < FTRACE_HASHSIZE; i++) {
1694                 INIT_HLIST_HEAD(&temp_list);
1695                 head = &ftrace_hash[i];
1696
1697                 /* all CPUS are stopped, we are safe to modify code */
1698                 hlist_for_each_entry_safe(rec, t, n, head, node) {
1699                         if (rec->flags & FTRACE_FL_FREE)
1700                                 continue;
1701
1702                         if ((rec->ip >= start) && (rec->ip < end))
1703                                 ftrace_free_rec(rec);
1704                 }
1705         }
1706
1707         ftrace_hash_unlock(flags);
1708
1709         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1710         preempt_enable_notrace();
1711
1712 }
1713
1714 static int ftraced(void *ignore)
1715 {
1716         unsigned long usecs;
1717
1718         while (!kthread_should_stop()) {
1719
1720                 set_current_state(TASK_INTERRUPTIBLE);
1721
1722                 /* check once a second */
1723                 schedule_timeout(HZ);
1724
1725                 if (unlikely(ftrace_disabled))
1726                         continue;
1727
1728                 mutex_lock(&ftrace_sysctl_lock);
1729                 mutex_lock(&ftraced_lock);
1730                 if (!ftraced_suspend && !ftraced_stop &&
1731                     ftrace_update_code()) {
1732                         usecs = nsecs_to_usecs(ftrace_update_time);
1733                         if (ftrace_update_tot_cnt > 100000) {
1734                                 ftrace_update_tot_cnt = 0;
1735                                 pr_info("hm, dftrace overflow: %lu change%s"
1736                                         " (%lu total) in %lu usec%s\n",
1737                                         ftrace_update_cnt,
1738                                         ftrace_update_cnt != 1 ? "s" : "",
1739                                         ftrace_update_tot_cnt,
1740                                         usecs, usecs != 1 ? "s" : "");
1741                                 ftrace_disabled = 1;
1742                                 WARN_ON_ONCE(1);
1743                         }
1744                 }
1745                 mutex_unlock(&ftraced_lock);
1746                 mutex_unlock(&ftrace_sysctl_lock);
1747
1748                 ftrace_shutdown_replenish();
1749         }
1750         __set_current_state(TASK_RUNNING);
1751         return 0;
1752 }
1753
1754 static int __init ftrace_dynamic_init(void)
1755 {
1756         struct task_struct *p;
1757         unsigned long addr;
1758         int ret;
1759
1760         addr = (unsigned long)ftrace_record_ip;
1761
1762         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1763
1764         /* ftrace_dyn_arch_init places the return code in addr */
1765         if (addr) {
1766                 ret = (int)addr;
1767                 goto failed;
1768         }
1769
1770         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1771         if (ret)
1772                 goto failed;
1773
1774         p = kthread_run(ftraced, NULL, "ftraced");
1775         if (IS_ERR(p)) {
1776                 ret = -1;
1777                 goto failed;
1778         }
1779
1780         last_ftrace_enabled = ftrace_enabled = 1;
1781         ftraced_task = p;
1782
1783         return 0;
1784
1785  failed:
1786         ftrace_disabled = 1;
1787         return ret;
1788 }
1789
1790 core_initcall(ftrace_dynamic_init);
1791 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1792
1793 #else
1794 # define ftrace_startup()               do { } while (0)
1795 # define ftrace_shutdown()              do { } while (0)
1796 # define ftrace_startup_sysctl()        do { } while (0)
1797 # define ftrace_shutdown_sysctl()       do { } while (0)
1798 # define ftrace_force_shutdown()        do { } while (0)
1799 #endif /* CONFIG_DYNAMIC_FTRACE */
1800
1801 /**
1802  * ftrace_kill_atomic - kill ftrace from critical sections
1803  *
1804  * This function should be used by panic code. It stops ftrace
1805  * but in a not so nice way. If you need to simply kill ftrace
1806  * from a non-atomic section, use ftrace_kill.
1807  */
1808 void ftrace_kill_atomic(void)
1809 {
1810         ftrace_disabled = 1;
1811         ftrace_enabled = 0;
1812 #ifdef CONFIG_DYNAMIC_FTRACE
1813         ftraced_suspend = -1;
1814 #endif
1815         clear_ftrace_function();
1816 }
1817
1818 /**
1819  * ftrace_kill - totally shutdown ftrace
1820  *
1821  * This is a safety measure. If something was detected that seems
1822  * wrong, calling this function will keep ftrace from doing
1823  * any more modifications, and updates.
1824  * used when something went wrong.
1825  */
1826 void ftrace_kill(void)
1827 {
1828         mutex_lock(&ftrace_sysctl_lock);
1829         ftrace_disabled = 1;
1830         ftrace_enabled = 0;
1831
1832         clear_ftrace_function();
1833         mutex_unlock(&ftrace_sysctl_lock);
1834
1835         /* Try to totally disable ftrace */
1836         ftrace_force_shutdown();
1837 }
1838
1839 /**
1840  * register_ftrace_function - register a function for profiling
1841  * @ops - ops structure that holds the function for profiling.
1842  *
1843  * Register a function to be called by all functions in the
1844  * kernel.
1845  *
1846  * Note: @ops->func and all the functions it calls must be labeled
1847  *       with "notrace", otherwise it will go into a
1848  *       recursive loop.
1849  */
1850 int register_ftrace_function(struct ftrace_ops *ops)
1851 {
1852         int ret;
1853
1854         if (unlikely(ftrace_disabled))
1855                 return -1;
1856
1857         mutex_lock(&ftrace_sysctl_lock);
1858         ret = __register_ftrace_function(ops);
1859         ftrace_startup();
1860         mutex_unlock(&ftrace_sysctl_lock);
1861
1862         return ret;
1863 }
1864
1865 /**
1866  * unregister_ftrace_function - unresgister a function for profiling.
1867  * @ops - ops structure that holds the function to unregister
1868  *
1869  * Unregister a function that was added to be called by ftrace profiling.
1870  */
1871 int unregister_ftrace_function(struct ftrace_ops *ops)
1872 {
1873         int ret;
1874
1875         mutex_lock(&ftrace_sysctl_lock);
1876         ret = __unregister_ftrace_function(ops);
1877         ftrace_shutdown();
1878         mutex_unlock(&ftrace_sysctl_lock);
1879
1880         return ret;
1881 }
1882
1883 int
1884 ftrace_enable_sysctl(struct ctl_table *table, int write,
1885                      struct file *file, void __user *buffer, size_t *lenp,
1886                      loff_t *ppos)
1887 {
1888         int ret;
1889
1890         if (unlikely(ftrace_disabled))
1891                 return -ENODEV;
1892
1893         mutex_lock(&ftrace_sysctl_lock);
1894
1895         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1896
1897         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1898                 goto out;
1899
1900         last_ftrace_enabled = ftrace_enabled;
1901
1902         if (ftrace_enabled) {
1903
1904                 ftrace_startup_sysctl();
1905
1906                 /* we are starting ftrace again */
1907                 if (ftrace_list != &ftrace_list_end) {
1908                         if (ftrace_list->next == &ftrace_list_end)
1909                                 ftrace_trace_function = ftrace_list->func;
1910                         else
1911                                 ftrace_trace_function = ftrace_list_func;
1912                 }
1913
1914         } else {
1915                 /* stopping ftrace calls (just send to ftrace_stub) */
1916                 ftrace_trace_function = ftrace_stub;
1917
1918                 ftrace_shutdown_sysctl();
1919         }
1920
1921  out:
1922         mutex_unlock(&ftrace_sysctl_lock);
1923         return ret;
1924 }