]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: do not show freed records in available_filter_functions
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* Should never be called by interrupts */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         spin_lock(&ftrace_lock);
119
120         /*
121          * If we are removing the last function, then simply point
122          * to the ftrace_stub.
123          */
124         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125                 ftrace_trace_function = ftrace_stub;
126                 ftrace_list = &ftrace_list_end;
127                 goto out;
128         }
129
130         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131                 if (*p == ops)
132                         break;
133
134         if (*p != ops) {
135                 ret = -1;
136                 goto out;
137         }
138
139         *p = (*p)->next;
140
141         if (ftrace_enabled) {
142                 /* If we only have one func left, then call that directly */
143                 if (ftrace_list == &ftrace_list_end ||
144                     ftrace_list->next == &ftrace_list_end)
145                         ftrace_trace_function = ftrace_list->func;
146         }
147
148  out:
149         spin_unlock(&ftrace_lock);
150
151         return ret;
152 }
153
154 #ifdef CONFIG_DYNAMIC_FTRACE
155
156 static struct task_struct *ftraced_task;
157
158 enum {
159         FTRACE_ENABLE_CALLS             = (1 << 0),
160         FTRACE_DISABLE_CALLS            = (1 << 1),
161         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
162         FTRACE_ENABLE_MCOUNT            = (1 << 3),
163         FTRACE_DISABLE_MCOUNT           = (1 << 4),
164 };
165
166 static int ftrace_filtered;
167 static int tracing_on;
168 static int frozen_record_count;
169
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_regex_lock);
177
178 struct ftrace_page {
179         struct ftrace_page      *next;
180         unsigned long           index;
181         struct dyn_ftrace       records[];
182 };
183
184 #define ENTRIES_PER_PAGE \
185   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187 /* estimate from running different kernels */
188 #define NR_TO_INIT              10000
189
190 static struct ftrace_page       *ftrace_pages_start;
191 static struct ftrace_page       *ftrace_pages;
192
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195 static int ftraced_stop;
196
197 static int ftrace_record_suspend;
198
199 static struct dyn_ftrace *ftrace_free_records;
200
201
202 #ifdef CONFIG_KPROBES
203 static inline void freeze_record(struct dyn_ftrace *rec)
204 {
205         if (!(rec->flags & FTRACE_FL_FROZEN)) {
206                 rec->flags |= FTRACE_FL_FROZEN;
207                 frozen_record_count++;
208         }
209 }
210
211 static inline void unfreeze_record(struct dyn_ftrace *rec)
212 {
213         if (rec->flags & FTRACE_FL_FROZEN) {
214                 rec->flags &= ~FTRACE_FL_FROZEN;
215                 frozen_record_count--;
216         }
217 }
218
219 static inline int record_frozen(struct dyn_ftrace *rec)
220 {
221         return rec->flags & FTRACE_FL_FROZEN;
222 }
223 #else
224 # define freeze_record(rec)                     ({ 0; })
225 # define unfreeze_record(rec)                   ({ 0; })
226 # define record_frozen(rec)                     ({ 0; })
227 #endif /* CONFIG_KPROBES */
228
229 int skip_trace(unsigned long ip)
230 {
231         unsigned long fl;
232         struct dyn_ftrace *rec;
233         struct hlist_node *t;
234         struct hlist_head *head;
235
236         if (frozen_record_count == 0)
237                 return 0;
238
239         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240         hlist_for_each_entry_rcu(rec, t, head, node) {
241                 if (rec->ip == ip) {
242                         if (record_frozen(rec)) {
243                                 if (rec->flags & FTRACE_FL_FAILED)
244                                         return 1;
245
246                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
247                                         return 1;
248
249                                 if (!tracing_on || !ftrace_enabled)
250                                         return 1;
251
252                                 if (ftrace_filtered) {
253                                         fl = rec->flags & (FTRACE_FL_FILTER |
254                                                            FTRACE_FL_NOTRACE);
255                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
256                                                 return 1;
257                                 }
258                         }
259                         break;
260                 }
261         }
262
263         return 0;
264 }
265
266 static inline int
267 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
268 {
269         struct dyn_ftrace *p;
270         struct hlist_node *t;
271         int found = 0;
272
273         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
274                 if (p->ip == ip) {
275                         found = 1;
276                         break;
277                 }
278         }
279
280         return found;
281 }
282
283 static inline void
284 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
285 {
286         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
287 }
288
289 /* called from kstop_machine */
290 static inline void ftrace_del_hash(struct dyn_ftrace *node)
291 {
292         hlist_del(&node->node);
293 }
294
295 static void ftrace_free_rec(struct dyn_ftrace *rec)
296 {
297         /* no locking, only called from kstop_machine */
298
299         rec->ip = (unsigned long)ftrace_free_records;
300         ftrace_free_records = rec;
301         rec->flags |= FTRACE_FL_FREE;
302 }
303
304 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
305 {
306         struct dyn_ftrace *rec;
307
308         /* First check for freed records */
309         if (ftrace_free_records) {
310                 rec = ftrace_free_records;
311
312                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
313                         WARN_ON_ONCE(1);
314                         ftrace_free_records = NULL;
315                         ftrace_disabled = 1;
316                         ftrace_enabled = 0;
317                         return NULL;
318                 }
319
320                 ftrace_free_records = (void *)rec->ip;
321                 memset(rec, 0, sizeof(*rec));
322                 return rec;
323         }
324
325         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
326                 if (!ftrace_pages->next)
327                         return NULL;
328                 ftrace_pages = ftrace_pages->next;
329         }
330
331         return &ftrace_pages->records[ftrace_pages->index++];
332 }
333
334 static void
335 ftrace_record_ip(unsigned long ip)
336 {
337         struct dyn_ftrace *node;
338         unsigned long flags;
339         unsigned long key;
340         int resched;
341         int atomic;
342         int cpu;
343
344         if (!ftrace_enabled || ftrace_disabled)
345                 return;
346
347         resched = need_resched();
348         preempt_disable_notrace();
349
350         /*
351          * We simply need to protect against recursion.
352          * Use the the raw version of smp_processor_id and not
353          * __get_cpu_var which can call debug hooks that can
354          * cause a recursive crash here.
355          */
356         cpu = raw_smp_processor_id();
357         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
358         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
359                 goto out;
360
361         if (unlikely(ftrace_record_suspend))
362                 goto out;
363
364         key = hash_long(ip, FTRACE_HASHBITS);
365
366         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
367
368         if (ftrace_ip_in_hash(ip, key))
369                 goto out;
370
371         atomic = irqs_disabled();
372
373         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374
375         /* This ip may have hit the hash before the lock */
376         if (ftrace_ip_in_hash(ip, key))
377                 goto out_unlock;
378
379         node = ftrace_alloc_dyn_node(ip);
380         if (!node)
381                 goto out_unlock;
382
383         node->ip = ip;
384
385         ftrace_add_hash(node, key);
386
387         ftraced_trigger = 1;
388
389  out_unlock:
390         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
391  out:
392         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
393
394         /* prevent recursion with scheduler */
395         if (resched)
396                 preempt_enable_no_resched_notrace();
397         else
398                 preempt_enable_notrace();
399 }
400
401 #define FTRACE_ADDR ((long)(ftrace_caller))
402
403 static int
404 __ftrace_replace_code(struct dyn_ftrace *rec,
405                       unsigned char *old, unsigned char *new, int enable)
406 {
407         unsigned long ip, fl;
408
409         ip = rec->ip;
410
411         if (ftrace_filtered && enable) {
412                 /*
413                  * If filtering is on:
414                  *
415                  * If this record is set to be filtered and
416                  * is enabled then do nothing.
417                  *
418                  * If this record is set to be filtered and
419                  * it is not enabled, enable it.
420                  *
421                  * If this record is not set to be filtered
422                  * and it is not enabled do nothing.
423                  *
424                  * If this record is set not to trace then
425                  * do nothing.
426                  *
427                  * If this record is set not to trace and
428                  * it is enabled then disable it.
429                  *
430                  * If this record is not set to be filtered and
431                  * it is enabled, disable it.
432                  */
433
434                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
435                                    FTRACE_FL_ENABLED);
436
437                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
438                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
439                     !fl || (fl == FTRACE_FL_NOTRACE))
440                         return 0;
441
442                 /*
443                  * If it is enabled disable it,
444                  * otherwise enable it!
445                  */
446                 if (fl & FTRACE_FL_ENABLED) {
447                         /* swap new and old */
448                         new = old;
449                         old = ftrace_call_replace(ip, FTRACE_ADDR);
450                         rec->flags &= ~FTRACE_FL_ENABLED;
451                 } else {
452                         new = ftrace_call_replace(ip, FTRACE_ADDR);
453                         rec->flags |= FTRACE_FL_ENABLED;
454                 }
455         } else {
456
457                 if (enable) {
458                         /*
459                          * If this record is set not to trace and is
460                          * not enabled, do nothing.
461                          */
462                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
463                         if (fl == FTRACE_FL_NOTRACE)
464                                 return 0;
465
466                         new = ftrace_call_replace(ip, FTRACE_ADDR);
467                 } else
468                         old = ftrace_call_replace(ip, FTRACE_ADDR);
469
470                 if (enable) {
471                         if (rec->flags & FTRACE_FL_ENABLED)
472                                 return 0;
473                         rec->flags |= FTRACE_FL_ENABLED;
474                 } else {
475                         if (!(rec->flags & FTRACE_FL_ENABLED))
476                                 return 0;
477                         rec->flags &= ~FTRACE_FL_ENABLED;
478                 }
479         }
480
481         return ftrace_modify_code(ip, old, new);
482 }
483
484 static void ftrace_replace_code(int enable)
485 {
486         int i, failed;
487         unsigned char *new = NULL, *old = NULL;
488         struct dyn_ftrace *rec;
489         struct ftrace_page *pg;
490
491         if (enable)
492                 old = ftrace_nop_replace();
493         else
494                 new = ftrace_nop_replace();
495
496         for (pg = ftrace_pages_start; pg; pg = pg->next) {
497                 for (i = 0; i < pg->index; i++) {
498                         rec = &pg->records[i];
499
500                         /* don't modify code that has already faulted */
501                         if (rec->flags & FTRACE_FL_FAILED)
502                                 continue;
503
504                         /* ignore updates to this record's mcount site */
505                         if (get_kprobe((void *)rec->ip)) {
506                                 freeze_record(rec);
507                                 continue;
508                         } else {
509                                 unfreeze_record(rec);
510                         }
511
512                         failed = __ftrace_replace_code(rec, old, new, enable);
513                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
514                                 rec->flags |= FTRACE_FL_FAILED;
515                                 if ((system_state == SYSTEM_BOOTING) ||
516                                     !core_kernel_text(rec->ip)) {
517                                         ftrace_del_hash(rec);
518                                         ftrace_free_rec(rec);
519                                 }
520                         }
521                 }
522         }
523 }
524
525 static void ftrace_shutdown_replenish(void)
526 {
527         if (ftrace_pages->next)
528                 return;
529
530         /* allocate another page */
531         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
532 }
533
534 static int
535 ftrace_code_disable(struct dyn_ftrace *rec)
536 {
537         unsigned long ip;
538         unsigned char *nop, *call;
539         int failed;
540
541         ip = rec->ip;
542
543         nop = ftrace_nop_replace();
544         call = ftrace_call_replace(ip, MCOUNT_ADDR);
545
546         failed = ftrace_modify_code(ip, call, nop);
547         if (failed) {
548                 rec->flags |= FTRACE_FL_FAILED;
549                 return 0;
550         }
551         return 1;
552 }
553
554 static int __ftrace_update_code(void *ignore);
555
556 static int __ftrace_modify_code(void *data)
557 {
558         unsigned long addr;
559         int *command = data;
560
561         if (*command & FTRACE_ENABLE_CALLS) {
562                 /*
563                  * Update any recorded ips now that we have the
564                  * machine stopped
565                  */
566                 __ftrace_update_code(NULL);
567                 ftrace_replace_code(1);
568                 tracing_on = 1;
569         } else if (*command & FTRACE_DISABLE_CALLS) {
570                 ftrace_replace_code(0);
571                 tracing_on = 0;
572         }
573
574         if (*command & FTRACE_UPDATE_TRACE_FUNC)
575                 ftrace_update_ftrace_func(ftrace_trace_function);
576
577         if (*command & FTRACE_ENABLE_MCOUNT) {
578                 addr = (unsigned long)ftrace_record_ip;
579                 ftrace_mcount_set(&addr);
580         } else if (*command & FTRACE_DISABLE_MCOUNT) {
581                 addr = (unsigned long)ftrace_stub;
582                 ftrace_mcount_set(&addr);
583         }
584
585         return 0;
586 }
587
588 static void ftrace_run_update_code(int command)
589 {
590         stop_machine(__ftrace_modify_code, &command, NULL);
591 }
592
593 void ftrace_disable_daemon(void)
594 {
595         /* Stop the daemon from calling kstop_machine */
596         mutex_lock(&ftraced_lock);
597         ftraced_stop = 1;
598         mutex_unlock(&ftraced_lock);
599
600         ftrace_force_update();
601 }
602
603 void ftrace_enable_daemon(void)
604 {
605         mutex_lock(&ftraced_lock);
606         ftraced_stop = 0;
607         mutex_unlock(&ftraced_lock);
608
609         ftrace_force_update();
610 }
611
612 static ftrace_func_t saved_ftrace_func;
613
614 static void ftrace_startup(void)
615 {
616         int command = 0;
617
618         if (unlikely(ftrace_disabled))
619                 return;
620
621         mutex_lock(&ftraced_lock);
622         ftraced_suspend++;
623         if (ftraced_suspend == 1)
624                 command |= FTRACE_ENABLE_CALLS;
625
626         if (saved_ftrace_func != ftrace_trace_function) {
627                 saved_ftrace_func = ftrace_trace_function;
628                 command |= FTRACE_UPDATE_TRACE_FUNC;
629         }
630
631         if (!command || !ftrace_enabled)
632                 goto out;
633
634         ftrace_run_update_code(command);
635  out:
636         mutex_unlock(&ftraced_lock);
637 }
638
639 static void ftrace_shutdown(void)
640 {
641         int command = 0;
642
643         if (unlikely(ftrace_disabled))
644                 return;
645
646         mutex_lock(&ftraced_lock);
647         ftraced_suspend--;
648         if (!ftraced_suspend)
649                 command |= FTRACE_DISABLE_CALLS;
650
651         if (saved_ftrace_func != ftrace_trace_function) {
652                 saved_ftrace_func = ftrace_trace_function;
653                 command |= FTRACE_UPDATE_TRACE_FUNC;
654         }
655
656         if (!command || !ftrace_enabled)
657                 goto out;
658
659         ftrace_run_update_code(command);
660  out:
661         mutex_unlock(&ftraced_lock);
662 }
663
664 static void ftrace_startup_sysctl(void)
665 {
666         int command = FTRACE_ENABLE_MCOUNT;
667
668         if (unlikely(ftrace_disabled))
669                 return;
670
671         mutex_lock(&ftraced_lock);
672         /* Force update next time */
673         saved_ftrace_func = NULL;
674         /* ftraced_suspend is true if we want ftrace running */
675         if (ftraced_suspend)
676                 command |= FTRACE_ENABLE_CALLS;
677
678         ftrace_run_update_code(command);
679         mutex_unlock(&ftraced_lock);
680 }
681
682 static void ftrace_shutdown_sysctl(void)
683 {
684         int command = FTRACE_DISABLE_MCOUNT;
685
686         if (unlikely(ftrace_disabled))
687                 return;
688
689         mutex_lock(&ftraced_lock);
690         /* ftraced_suspend is true if ftrace is running */
691         if (ftraced_suspend)
692                 command |= FTRACE_DISABLE_CALLS;
693
694         ftrace_run_update_code(command);
695         mutex_unlock(&ftraced_lock);
696 }
697
698 static cycle_t          ftrace_update_time;
699 static unsigned long    ftrace_update_cnt;
700 unsigned long           ftrace_update_tot_cnt;
701
702 static int __ftrace_update_code(void *ignore)
703 {
704         int i, save_ftrace_enabled;
705         cycle_t start, stop;
706         struct dyn_ftrace *p;
707         struct hlist_node *t, *n;
708         struct hlist_head *head, temp_list;
709
710         /* Don't be recording funcs now */
711         ftrace_record_suspend++;
712         save_ftrace_enabled = ftrace_enabled;
713         ftrace_enabled = 0;
714
715         start = ftrace_now(raw_smp_processor_id());
716         ftrace_update_cnt = 0;
717
718         /* No locks needed, the machine is stopped! */
719         for (i = 0; i < FTRACE_HASHSIZE; i++) {
720                 INIT_HLIST_HEAD(&temp_list);
721                 head = &ftrace_hash[i];
722
723                 /* all CPUS are stopped, we are safe to modify code */
724                 hlist_for_each_entry_safe(p, t, n, head, node) {
725                         /* Skip over failed records which have not been
726                          * freed. */
727                         if (p->flags & FTRACE_FL_FAILED)
728                                 continue;
729
730                         /* Unconverted records are always at the head of the
731                          * hash bucket. Once we encounter a converted record,
732                          * simply skip over to the next bucket. Saves ftraced
733                          * some processor cycles (ftrace does its bid for
734                          * global warming :-p ). */
735                         if (p->flags & (FTRACE_FL_CONVERTED))
736                                 break;
737
738                         /* Ignore updates to this record's mcount site.
739                          * Reintroduce this record at the head of this
740                          * bucket to attempt to "convert" it again if
741                          * the kprobe on it is unregistered before the
742                          * next run. */
743                         if (get_kprobe((void *)p->ip)) {
744                                 ftrace_del_hash(p);
745                                 INIT_HLIST_NODE(&p->node);
746                                 hlist_add_head(&p->node, &temp_list);
747                                 freeze_record(p);
748                                 continue;
749                         } else {
750                                 unfreeze_record(p);
751                         }
752
753                         /* convert record (i.e, patch mcount-call with NOP) */
754                         if (ftrace_code_disable(p)) {
755                                 p->flags |= FTRACE_FL_CONVERTED;
756                                 ftrace_update_cnt++;
757                         } else {
758                                 if ((system_state == SYSTEM_BOOTING) ||
759                                     !core_kernel_text(p->ip)) {
760                                         ftrace_del_hash(p);
761                                         ftrace_free_rec(p);
762                                 }
763                         }
764                 }
765
766                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
767                         hlist_del(&p->node);
768                         INIT_HLIST_NODE(&p->node);
769                         hlist_add_head(&p->node, head);
770                 }
771         }
772
773         stop = ftrace_now(raw_smp_processor_id());
774         ftrace_update_time = stop - start;
775         ftrace_update_tot_cnt += ftrace_update_cnt;
776         ftraced_trigger = 0;
777
778         ftrace_enabled = save_ftrace_enabled;
779         ftrace_record_suspend--;
780
781         return 0;
782 }
783
784 static int ftrace_update_code(void)
785 {
786         if (unlikely(ftrace_disabled) ||
787             !ftrace_enabled || !ftraced_trigger)
788                 return 0;
789
790         stop_machine(__ftrace_update_code, NULL, NULL);
791
792         return 1;
793 }
794
795 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
796 {
797         struct ftrace_page *pg;
798         int cnt;
799         int i;
800
801         /* allocate a few pages */
802         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
803         if (!ftrace_pages_start)
804                 return -1;
805
806         /*
807          * Allocate a few more pages.
808          *
809          * TODO: have some parser search vmlinux before
810          *   final linking to find all calls to ftrace.
811          *   Then we can:
812          *    a) know how many pages to allocate.
813          *     and/or
814          *    b) set up the table then.
815          *
816          *  The dynamic code is still necessary for
817          *  modules.
818          */
819
820         pg = ftrace_pages = ftrace_pages_start;
821
822         cnt = num_to_init / ENTRIES_PER_PAGE;
823         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
824                 num_to_init, cnt);
825
826         for (i = 0; i < cnt; i++) {
827                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
828
829                 /* If we fail, we'll try later anyway */
830                 if (!pg->next)
831                         break;
832
833                 pg = pg->next;
834         }
835
836         return 0;
837 }
838
839 enum {
840         FTRACE_ITER_FILTER      = (1 << 0),
841         FTRACE_ITER_CONT        = (1 << 1),
842         FTRACE_ITER_NOTRACE     = (1 << 2),
843         FTRACE_ITER_FAILURES    = (1 << 3),
844 };
845
846 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
847
848 struct ftrace_iterator {
849         loff_t                  pos;
850         struct ftrace_page      *pg;
851         unsigned                idx;
852         unsigned                flags;
853         unsigned char           buffer[FTRACE_BUFF_MAX+1];
854         unsigned                buffer_idx;
855         unsigned                filtered;
856 };
857
858 static void *
859 t_next(struct seq_file *m, void *v, loff_t *pos)
860 {
861         struct ftrace_iterator *iter = m->private;
862         struct dyn_ftrace *rec = NULL;
863
864         (*pos)++;
865
866  retry:
867         if (iter->idx >= iter->pg->index) {
868                 if (iter->pg->next) {
869                         iter->pg = iter->pg->next;
870                         iter->idx = 0;
871                         goto retry;
872                 }
873         } else {
874                 rec = &iter->pg->records[iter->idx++];
875                 if ((rec->flags & FTRACE_FL_FREE) ||
876
877                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
878                      (rec->flags & FTRACE_FL_FAILED)) ||
879
880                     ((iter->flags & FTRACE_ITER_FAILURES) &&
881                      !(rec->flags & FTRACE_FL_FAILED)) ||
882
883                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
884                      !(rec->flags & FTRACE_FL_NOTRACE))) {
885                         rec = NULL;
886                         goto retry;
887                 }
888         }
889
890         iter->pos = *pos;
891
892         return rec;
893 }
894
895 static void *t_start(struct seq_file *m, loff_t *pos)
896 {
897         struct ftrace_iterator *iter = m->private;
898         void *p = NULL;
899         loff_t l = -1;
900
901         if (*pos != iter->pos) {
902                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
903                         ;
904         } else {
905                 l = *pos;
906                 p = t_next(m, p, &l);
907         }
908
909         return p;
910 }
911
912 static void t_stop(struct seq_file *m, void *p)
913 {
914 }
915
916 static int t_show(struct seq_file *m, void *v)
917 {
918         struct dyn_ftrace *rec = v;
919         char str[KSYM_SYMBOL_LEN];
920
921         if (!rec)
922                 return 0;
923
924         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
925
926         seq_printf(m, "%s\n", str);
927
928         return 0;
929 }
930
931 static struct seq_operations show_ftrace_seq_ops = {
932         .start = t_start,
933         .next = t_next,
934         .stop = t_stop,
935         .show = t_show,
936 };
937
938 static int
939 ftrace_avail_open(struct inode *inode, struct file *file)
940 {
941         struct ftrace_iterator *iter;
942         int ret;
943
944         if (unlikely(ftrace_disabled))
945                 return -ENODEV;
946
947         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
948         if (!iter)
949                 return -ENOMEM;
950
951         iter->pg = ftrace_pages_start;
952         iter->pos = -1;
953
954         ret = seq_open(file, &show_ftrace_seq_ops);
955         if (!ret) {
956                 struct seq_file *m = file->private_data;
957
958                 m->private = iter;
959         } else {
960                 kfree(iter);
961         }
962
963         return ret;
964 }
965
966 int ftrace_avail_release(struct inode *inode, struct file *file)
967 {
968         struct seq_file *m = (struct seq_file *)file->private_data;
969         struct ftrace_iterator *iter = m->private;
970
971         seq_release(inode, file);
972         kfree(iter);
973
974         return 0;
975 }
976
977 static int
978 ftrace_failures_open(struct inode *inode, struct file *file)
979 {
980         int ret;
981         struct seq_file *m;
982         struct ftrace_iterator *iter;
983
984         ret = ftrace_avail_open(inode, file);
985         if (!ret) {
986                 m = (struct seq_file *)file->private_data;
987                 iter = (struct ftrace_iterator *)m->private;
988                 iter->flags = FTRACE_ITER_FAILURES;
989         }
990
991         return ret;
992 }
993
994
995 static void ftrace_filter_reset(int enable)
996 {
997         struct ftrace_page *pg;
998         struct dyn_ftrace *rec;
999         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1000         unsigned i;
1001
1002         /* keep kstop machine from running */
1003         preempt_disable();
1004         if (enable)
1005                 ftrace_filtered = 0;
1006         pg = ftrace_pages_start;
1007         while (pg) {
1008                 for (i = 0; i < pg->index; i++) {
1009                         rec = &pg->records[i];
1010                         if (rec->flags & FTRACE_FL_FAILED)
1011                                 continue;
1012                         rec->flags &= ~type;
1013                 }
1014                 pg = pg->next;
1015         }
1016         preempt_enable();
1017 }
1018
1019 static int
1020 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1021 {
1022         struct ftrace_iterator *iter;
1023         int ret = 0;
1024
1025         if (unlikely(ftrace_disabled))
1026                 return -ENODEV;
1027
1028         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1029         if (!iter)
1030                 return -ENOMEM;
1031
1032         mutex_lock(&ftrace_regex_lock);
1033         if ((file->f_mode & FMODE_WRITE) &&
1034             !(file->f_flags & O_APPEND))
1035                 ftrace_filter_reset(enable);
1036
1037         if (file->f_mode & FMODE_READ) {
1038                 iter->pg = ftrace_pages_start;
1039                 iter->pos = -1;
1040                 iter->flags = enable ? FTRACE_ITER_FILTER :
1041                         FTRACE_ITER_NOTRACE;
1042
1043                 ret = seq_open(file, &show_ftrace_seq_ops);
1044                 if (!ret) {
1045                         struct seq_file *m = file->private_data;
1046                         m->private = iter;
1047                 } else
1048                         kfree(iter);
1049         } else
1050                 file->private_data = iter;
1051         mutex_unlock(&ftrace_regex_lock);
1052
1053         return ret;
1054 }
1055
1056 static int
1057 ftrace_filter_open(struct inode *inode, struct file *file)
1058 {
1059         return ftrace_regex_open(inode, file, 1);
1060 }
1061
1062 static int
1063 ftrace_notrace_open(struct inode *inode, struct file *file)
1064 {
1065         return ftrace_regex_open(inode, file, 0);
1066 }
1067
1068 static ssize_t
1069 ftrace_regex_read(struct file *file, char __user *ubuf,
1070                        size_t cnt, loff_t *ppos)
1071 {
1072         if (file->f_mode & FMODE_READ)
1073                 return seq_read(file, ubuf, cnt, ppos);
1074         else
1075                 return -EPERM;
1076 }
1077
1078 static loff_t
1079 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1080 {
1081         loff_t ret;
1082
1083         if (file->f_mode & FMODE_READ)
1084                 ret = seq_lseek(file, offset, origin);
1085         else
1086                 file->f_pos = ret = 1;
1087
1088         return ret;
1089 }
1090
1091 enum {
1092         MATCH_FULL,
1093         MATCH_FRONT_ONLY,
1094         MATCH_MIDDLE_ONLY,
1095         MATCH_END_ONLY,
1096 };
1097
1098 static void
1099 ftrace_match(unsigned char *buff, int len, int enable)
1100 {
1101         char str[KSYM_SYMBOL_LEN];
1102         char *search = NULL;
1103         struct ftrace_page *pg;
1104         struct dyn_ftrace *rec;
1105         int type = MATCH_FULL;
1106         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1107         unsigned i, match = 0, search_len = 0;
1108
1109         for (i = 0; i < len; i++) {
1110                 if (buff[i] == '*') {
1111                         if (!i) {
1112                                 search = buff + i + 1;
1113                                 type = MATCH_END_ONLY;
1114                                 search_len = len - (i + 1);
1115                         } else {
1116                                 if (type == MATCH_END_ONLY) {
1117                                         type = MATCH_MIDDLE_ONLY;
1118                                 } else {
1119                                         match = i;
1120                                         type = MATCH_FRONT_ONLY;
1121                                 }
1122                                 buff[i] = 0;
1123                                 break;
1124                         }
1125                 }
1126         }
1127
1128         /* keep kstop machine from running */
1129         preempt_disable();
1130         if (enable)
1131                 ftrace_filtered = 1;
1132         pg = ftrace_pages_start;
1133         while (pg) {
1134                 for (i = 0; i < pg->index; i++) {
1135                         int matched = 0;
1136                         char *ptr;
1137
1138                         rec = &pg->records[i];
1139                         if (rec->flags & FTRACE_FL_FAILED)
1140                                 continue;
1141                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1142                         switch (type) {
1143                         case MATCH_FULL:
1144                                 if (strcmp(str, buff) == 0)
1145                                         matched = 1;
1146                                 break;
1147                         case MATCH_FRONT_ONLY:
1148                                 if (memcmp(str, buff, match) == 0)
1149                                         matched = 1;
1150                                 break;
1151                         case MATCH_MIDDLE_ONLY:
1152                                 if (strstr(str, search))
1153                                         matched = 1;
1154                                 break;
1155                         case MATCH_END_ONLY:
1156                                 ptr = strstr(str, search);
1157                                 if (ptr && (ptr[search_len] == 0))
1158                                         matched = 1;
1159                                 break;
1160                         }
1161                         if (matched)
1162                                 rec->flags |= flag;
1163                 }
1164                 pg = pg->next;
1165         }
1166         preempt_enable();
1167 }
1168
1169 static ssize_t
1170 ftrace_regex_write(struct file *file, const char __user *ubuf,
1171                    size_t cnt, loff_t *ppos, int enable)
1172 {
1173         struct ftrace_iterator *iter;
1174         char ch;
1175         size_t read = 0;
1176         ssize_t ret;
1177
1178         if (!cnt || cnt < 0)
1179                 return 0;
1180
1181         mutex_lock(&ftrace_regex_lock);
1182
1183         if (file->f_mode & FMODE_READ) {
1184                 struct seq_file *m = file->private_data;
1185                 iter = m->private;
1186         } else
1187                 iter = file->private_data;
1188
1189         if (!*ppos) {
1190                 iter->flags &= ~FTRACE_ITER_CONT;
1191                 iter->buffer_idx = 0;
1192         }
1193
1194         ret = get_user(ch, ubuf++);
1195         if (ret)
1196                 goto out;
1197         read++;
1198         cnt--;
1199
1200         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1201                 /* skip white space */
1202                 while (cnt && isspace(ch)) {
1203                         ret = get_user(ch, ubuf++);
1204                         if (ret)
1205                                 goto out;
1206                         read++;
1207                         cnt--;
1208                 }
1209
1210                 if (isspace(ch)) {
1211                         file->f_pos += read;
1212                         ret = read;
1213                         goto out;
1214                 }
1215
1216                 iter->buffer_idx = 0;
1217         }
1218
1219         while (cnt && !isspace(ch)) {
1220                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1221                         iter->buffer[iter->buffer_idx++] = ch;
1222                 else {
1223                         ret = -EINVAL;
1224                         goto out;
1225                 }
1226                 ret = get_user(ch, ubuf++);
1227                 if (ret)
1228                         goto out;
1229                 read++;
1230                 cnt--;
1231         }
1232
1233         if (isspace(ch)) {
1234                 iter->filtered++;
1235                 iter->buffer[iter->buffer_idx] = 0;
1236                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1237                 iter->buffer_idx = 0;
1238         } else
1239                 iter->flags |= FTRACE_ITER_CONT;
1240
1241
1242         file->f_pos += read;
1243
1244         ret = read;
1245  out:
1246         mutex_unlock(&ftrace_regex_lock);
1247
1248         return ret;
1249 }
1250
1251 static ssize_t
1252 ftrace_filter_write(struct file *file, const char __user *ubuf,
1253                     size_t cnt, loff_t *ppos)
1254 {
1255         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1256 }
1257
1258 static ssize_t
1259 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1260                      size_t cnt, loff_t *ppos)
1261 {
1262         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1263 }
1264
1265 static void
1266 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1267 {
1268         if (unlikely(ftrace_disabled))
1269                 return;
1270
1271         mutex_lock(&ftrace_regex_lock);
1272         if (reset)
1273                 ftrace_filter_reset(enable);
1274         if (buf)
1275                 ftrace_match(buf, len, enable);
1276         mutex_unlock(&ftrace_regex_lock);
1277 }
1278
1279 /**
1280  * ftrace_set_filter - set a function to filter on in ftrace
1281  * @buf - the string that holds the function filter text.
1282  * @len - the length of the string.
1283  * @reset - non zero to reset all filters before applying this filter.
1284  *
1285  * Filters denote which functions should be enabled when tracing is enabled.
1286  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1287  */
1288 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1289 {
1290         ftrace_set_regex(buf, len, reset, 1);
1291 }
1292
1293 /**
1294  * ftrace_set_notrace - set a function to not trace in ftrace
1295  * @buf - the string that holds the function notrace text.
1296  * @len - the length of the string.
1297  * @reset - non zero to reset all filters before applying this filter.
1298  *
1299  * Notrace Filters denote which functions should not be enabled when tracing
1300  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1301  * for tracing.
1302  */
1303 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1304 {
1305         ftrace_set_regex(buf, len, reset, 0);
1306 }
1307
1308 static int
1309 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1310 {
1311         struct seq_file *m = (struct seq_file *)file->private_data;
1312         struct ftrace_iterator *iter;
1313
1314         mutex_lock(&ftrace_regex_lock);
1315         if (file->f_mode & FMODE_READ) {
1316                 iter = m->private;
1317
1318                 seq_release(inode, file);
1319         } else
1320                 iter = file->private_data;
1321
1322         if (iter->buffer_idx) {
1323                 iter->filtered++;
1324                 iter->buffer[iter->buffer_idx] = 0;
1325                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1326         }
1327
1328         mutex_lock(&ftrace_sysctl_lock);
1329         mutex_lock(&ftraced_lock);
1330         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1331                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1332         mutex_unlock(&ftraced_lock);
1333         mutex_unlock(&ftrace_sysctl_lock);
1334
1335         kfree(iter);
1336         mutex_unlock(&ftrace_regex_lock);
1337         return 0;
1338 }
1339
1340 static int
1341 ftrace_filter_release(struct inode *inode, struct file *file)
1342 {
1343         return ftrace_regex_release(inode, file, 1);
1344 }
1345
1346 static int
1347 ftrace_notrace_release(struct inode *inode, struct file *file)
1348 {
1349         return ftrace_regex_release(inode, file, 0);
1350 }
1351
1352 static ssize_t
1353 ftraced_read(struct file *filp, char __user *ubuf,
1354                      size_t cnt, loff_t *ppos)
1355 {
1356         /* don't worry about races */
1357         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1358         int r = strlen(buf);
1359
1360         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1361 }
1362
1363 static ssize_t
1364 ftraced_write(struct file *filp, const char __user *ubuf,
1365                       size_t cnt, loff_t *ppos)
1366 {
1367         char buf[64];
1368         long val;
1369         int ret;
1370
1371         if (cnt >= sizeof(buf))
1372                 return -EINVAL;
1373
1374         if (copy_from_user(&buf, ubuf, cnt))
1375                 return -EFAULT;
1376
1377         if (strncmp(buf, "enable", 6) == 0)
1378                 val = 1;
1379         else if (strncmp(buf, "disable", 7) == 0)
1380                 val = 0;
1381         else {
1382                 buf[cnt] = 0;
1383
1384                 ret = strict_strtoul(buf, 10, &val);
1385                 if (ret < 0)
1386                         return ret;
1387
1388                 val = !!val;
1389         }
1390
1391         if (val)
1392                 ftrace_enable_daemon();
1393         else
1394                 ftrace_disable_daemon();
1395
1396         filp->f_pos += cnt;
1397
1398         return cnt;
1399 }
1400
1401 static struct file_operations ftrace_avail_fops = {
1402         .open = ftrace_avail_open,
1403         .read = seq_read,
1404         .llseek = seq_lseek,
1405         .release = ftrace_avail_release,
1406 };
1407
1408 static struct file_operations ftrace_failures_fops = {
1409         .open = ftrace_failures_open,
1410         .read = seq_read,
1411         .llseek = seq_lseek,
1412         .release = ftrace_avail_release,
1413 };
1414
1415 static struct file_operations ftrace_filter_fops = {
1416         .open = ftrace_filter_open,
1417         .read = ftrace_regex_read,
1418         .write = ftrace_filter_write,
1419         .llseek = ftrace_regex_lseek,
1420         .release = ftrace_filter_release,
1421 };
1422
1423 static struct file_operations ftrace_notrace_fops = {
1424         .open = ftrace_notrace_open,
1425         .read = ftrace_regex_read,
1426         .write = ftrace_notrace_write,
1427         .llseek = ftrace_regex_lseek,
1428         .release = ftrace_notrace_release,
1429 };
1430
1431 static struct file_operations ftraced_fops = {
1432         .open = tracing_open_generic,
1433         .read = ftraced_read,
1434         .write = ftraced_write,
1435 };
1436
1437 /**
1438  * ftrace_force_update - force an update to all recording ftrace functions
1439  */
1440 int ftrace_force_update(void)
1441 {
1442         int ret = 0;
1443
1444         if (unlikely(ftrace_disabled))
1445                 return -ENODEV;
1446
1447         mutex_lock(&ftrace_sysctl_lock);
1448         mutex_lock(&ftraced_lock);
1449
1450         /*
1451          * If ftraced_trigger is not set, then there is nothing
1452          * to update.
1453          */
1454         if (ftraced_trigger && !ftrace_update_code())
1455                 ret = -EBUSY;
1456
1457         mutex_unlock(&ftraced_lock);
1458         mutex_unlock(&ftrace_sysctl_lock);
1459
1460         return ret;
1461 }
1462
1463 static void ftrace_force_shutdown(void)
1464 {
1465         struct task_struct *task;
1466         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1467
1468         mutex_lock(&ftraced_lock);
1469         task = ftraced_task;
1470         ftraced_task = NULL;
1471         ftraced_suspend = -1;
1472         ftrace_run_update_code(command);
1473         mutex_unlock(&ftraced_lock);
1474
1475         if (task)
1476                 kthread_stop(task);
1477 }
1478
1479 static __init int ftrace_init_debugfs(void)
1480 {
1481         struct dentry *d_tracer;
1482         struct dentry *entry;
1483
1484         d_tracer = tracing_init_dentry();
1485
1486         entry = debugfs_create_file("available_filter_functions", 0444,
1487                                     d_tracer, NULL, &ftrace_avail_fops);
1488         if (!entry)
1489                 pr_warning("Could not create debugfs "
1490                            "'available_filter_functions' entry\n");
1491
1492         entry = debugfs_create_file("failures", 0444,
1493                                     d_tracer, NULL, &ftrace_failures_fops);
1494         if (!entry)
1495                 pr_warning("Could not create debugfs 'failures' entry\n");
1496
1497         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1498                                     NULL, &ftrace_filter_fops);
1499         if (!entry)
1500                 pr_warning("Could not create debugfs "
1501                            "'set_ftrace_filter' entry\n");
1502
1503         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1504                                     NULL, &ftrace_notrace_fops);
1505         if (!entry)
1506                 pr_warning("Could not create debugfs "
1507                            "'set_ftrace_notrace' entry\n");
1508
1509         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1510                                     NULL, &ftraced_fops);
1511         if (!entry)
1512                 pr_warning("Could not create debugfs "
1513                            "'ftraced_enabled' entry\n");
1514         return 0;
1515 }
1516
1517 fs_initcall(ftrace_init_debugfs);
1518
1519 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1520 static int ftrace_convert_nops(unsigned long *start,
1521                                unsigned long *end)
1522 {
1523         unsigned long *p;
1524         unsigned long addr;
1525         unsigned long flags;
1526
1527         p = start;
1528         while (p < end) {
1529                 addr = ftrace_call_adjust(*p++);
1530                 ftrace_record_ip(addr);
1531                 ftrace_shutdown_replenish();
1532         }
1533
1534         /* p is ignored */
1535         local_irq_save(flags);
1536         __ftrace_update_code(p);
1537         local_irq_restore(flags);
1538
1539         return 0;
1540 }
1541
1542 void ftrace_init_module(unsigned long *start, unsigned long *end)
1543 {
1544         ftrace_convert_nops(start, end);
1545 }
1546
1547 extern unsigned long __start_mcount_loc[];
1548 extern unsigned long __stop_mcount_loc[];
1549
1550 void __init ftrace_init(void)
1551 {
1552         unsigned long count, addr, flags;
1553         int ret;
1554
1555         /* Keep the ftrace pointer to the stub */
1556         addr = (unsigned long)ftrace_stub;
1557
1558         local_irq_save(flags);
1559         ftrace_dyn_arch_init(&addr);
1560         local_irq_restore(flags);
1561
1562         /* ftrace_dyn_arch_init places the return code in addr */
1563         if (addr)
1564                 goto failed;
1565
1566         count = __stop_mcount_loc - __start_mcount_loc;
1567
1568         ret = ftrace_dyn_table_alloc(count);
1569         if (ret)
1570                 goto failed;
1571
1572         last_ftrace_enabled = ftrace_enabled = 1;
1573
1574         ret = ftrace_convert_nops(__start_mcount_loc,
1575                                   __stop_mcount_loc);
1576
1577         return;
1578  failed:
1579         ftrace_disabled = 1;
1580 }
1581 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1582 static int ftraced(void *ignore)
1583 {
1584         unsigned long usecs;
1585
1586         while (!kthread_should_stop()) {
1587
1588                 set_current_state(TASK_INTERRUPTIBLE);
1589
1590                 /* check once a second */
1591                 schedule_timeout(HZ);
1592
1593                 if (unlikely(ftrace_disabled))
1594                         continue;
1595
1596                 mutex_lock(&ftrace_sysctl_lock);
1597                 mutex_lock(&ftraced_lock);
1598                 if (!ftraced_suspend && !ftraced_stop &&
1599                     ftrace_update_code()) {
1600                         usecs = nsecs_to_usecs(ftrace_update_time);
1601                         if (ftrace_update_tot_cnt > 100000) {
1602                                 ftrace_update_tot_cnt = 0;
1603                                 pr_info("hm, dftrace overflow: %lu change%s"
1604                                         " (%lu total) in %lu usec%s\n",
1605                                         ftrace_update_cnt,
1606                                         ftrace_update_cnt != 1 ? "s" : "",
1607                                         ftrace_update_tot_cnt,
1608                                         usecs, usecs != 1 ? "s" : "");
1609                                 ftrace_disabled = 1;
1610                                 WARN_ON_ONCE(1);
1611                         }
1612                 }
1613                 mutex_unlock(&ftraced_lock);
1614                 mutex_unlock(&ftrace_sysctl_lock);
1615
1616                 ftrace_shutdown_replenish();
1617         }
1618         __set_current_state(TASK_RUNNING);
1619         return 0;
1620 }
1621
1622 static int __init ftrace_dynamic_init(void)
1623 {
1624         struct task_struct *p;
1625         unsigned long addr;
1626         int ret;
1627
1628         addr = (unsigned long)ftrace_record_ip;
1629
1630         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1631
1632         /* ftrace_dyn_arch_init places the return code in addr */
1633         if (addr) {
1634                 ret = (int)addr;
1635                 goto failed;
1636         }
1637
1638         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1639         if (ret)
1640                 goto failed;
1641
1642         p = kthread_run(ftraced, NULL, "ftraced");
1643         if (IS_ERR(p)) {
1644                 ret = -1;
1645                 goto failed;
1646         }
1647
1648         last_ftrace_enabled = ftrace_enabled = 1;
1649         ftraced_task = p;
1650
1651         return 0;
1652
1653  failed:
1654         ftrace_disabled = 1;
1655         return ret;
1656 }
1657
1658 core_initcall(ftrace_dynamic_init);
1659 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1660
1661 #else
1662 # define ftrace_startup()               do { } while (0)
1663 # define ftrace_shutdown()              do { } while (0)
1664 # define ftrace_startup_sysctl()        do { } while (0)
1665 # define ftrace_shutdown_sysctl()       do { } while (0)
1666 # define ftrace_force_shutdown()        do { } while (0)
1667 #endif /* CONFIG_DYNAMIC_FTRACE */
1668
1669 /**
1670  * ftrace_kill_atomic - kill ftrace from critical sections
1671  *
1672  * This function should be used by panic code. It stops ftrace
1673  * but in a not so nice way. If you need to simply kill ftrace
1674  * from a non-atomic section, use ftrace_kill.
1675  */
1676 void ftrace_kill_atomic(void)
1677 {
1678         ftrace_disabled = 1;
1679         ftrace_enabled = 0;
1680 #ifdef CONFIG_DYNAMIC_FTRACE
1681         ftraced_suspend = -1;
1682 #endif
1683         clear_ftrace_function();
1684 }
1685
1686 /**
1687  * ftrace_kill - totally shutdown ftrace
1688  *
1689  * This is a safety measure. If something was detected that seems
1690  * wrong, calling this function will keep ftrace from doing
1691  * any more modifications, and updates.
1692  * used when something went wrong.
1693  */
1694 void ftrace_kill(void)
1695 {
1696         mutex_lock(&ftrace_sysctl_lock);
1697         ftrace_disabled = 1;
1698         ftrace_enabled = 0;
1699
1700         clear_ftrace_function();
1701         mutex_unlock(&ftrace_sysctl_lock);
1702
1703         /* Try to totally disable ftrace */
1704         ftrace_force_shutdown();
1705 }
1706
1707 /**
1708  * register_ftrace_function - register a function for profiling
1709  * @ops - ops structure that holds the function for profiling.
1710  *
1711  * Register a function to be called by all functions in the
1712  * kernel.
1713  *
1714  * Note: @ops->func and all the functions it calls must be labeled
1715  *       with "notrace", otherwise it will go into a
1716  *       recursive loop.
1717  */
1718 int register_ftrace_function(struct ftrace_ops *ops)
1719 {
1720         int ret;
1721
1722         if (unlikely(ftrace_disabled))
1723                 return -1;
1724
1725         mutex_lock(&ftrace_sysctl_lock);
1726         ret = __register_ftrace_function(ops);
1727         ftrace_startup();
1728         mutex_unlock(&ftrace_sysctl_lock);
1729
1730         return ret;
1731 }
1732
1733 /**
1734  * unregister_ftrace_function - unresgister a function for profiling.
1735  * @ops - ops structure that holds the function to unregister
1736  *
1737  * Unregister a function that was added to be called by ftrace profiling.
1738  */
1739 int unregister_ftrace_function(struct ftrace_ops *ops)
1740 {
1741         int ret;
1742
1743         mutex_lock(&ftrace_sysctl_lock);
1744         ret = __unregister_ftrace_function(ops);
1745         ftrace_shutdown();
1746         mutex_unlock(&ftrace_sysctl_lock);
1747
1748         return ret;
1749 }
1750
1751 int
1752 ftrace_enable_sysctl(struct ctl_table *table, int write,
1753                      struct file *file, void __user *buffer, size_t *lenp,
1754                      loff_t *ppos)
1755 {
1756         int ret;
1757
1758         if (unlikely(ftrace_disabled))
1759                 return -ENODEV;
1760
1761         mutex_lock(&ftrace_sysctl_lock);
1762
1763         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1764
1765         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1766                 goto out;
1767
1768         last_ftrace_enabled = ftrace_enabled;
1769
1770         if (ftrace_enabled) {
1771
1772                 ftrace_startup_sysctl();
1773
1774                 /* we are starting ftrace again */
1775                 if (ftrace_list != &ftrace_list_end) {
1776                         if (ftrace_list->next == &ftrace_list_end)
1777                                 ftrace_trace_function = ftrace_list->func;
1778                         else
1779                                 ftrace_trace_function = ftrace_list_func;
1780                 }
1781
1782         } else {
1783                 /* stopping ftrace calls (just send to ftrace_stub) */
1784                 ftrace_trace_function = ftrace_stub;
1785
1786                 ftrace_shutdown_sysctl();
1787         }
1788
1789  out:
1790         mutex_unlock(&ftrace_sysctl_lock);
1791         return ret;
1792 }