]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: do not init module on ftrace disabled
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* Should never be called by interrupts */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         spin_lock(&ftrace_lock);
119
120         /*
121          * If we are removing the last function, then simply point
122          * to the ftrace_stub.
123          */
124         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125                 ftrace_trace_function = ftrace_stub;
126                 ftrace_list = &ftrace_list_end;
127                 goto out;
128         }
129
130         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131                 if (*p == ops)
132                         break;
133
134         if (*p != ops) {
135                 ret = -1;
136                 goto out;
137         }
138
139         *p = (*p)->next;
140
141         if (ftrace_enabled) {
142                 /* If we only have one func left, then call that directly */
143                 if (ftrace_list == &ftrace_list_end ||
144                     ftrace_list->next == &ftrace_list_end)
145                         ftrace_trace_function = ftrace_list->func;
146         }
147
148  out:
149         spin_unlock(&ftrace_lock);
150
151         return ret;
152 }
153
154 #ifdef CONFIG_DYNAMIC_FTRACE
155
156 static struct task_struct *ftraced_task;
157
158 enum {
159         FTRACE_ENABLE_CALLS             = (1 << 0),
160         FTRACE_DISABLE_CALLS            = (1 << 1),
161         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
162         FTRACE_ENABLE_MCOUNT            = (1 << 3),
163         FTRACE_DISABLE_MCOUNT           = (1 << 4),
164 };
165
166 static int ftrace_filtered;
167 static int tracing_on;
168 static int frozen_record_count;
169
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_regex_lock);
177
178 struct ftrace_page {
179         struct ftrace_page      *next;
180         unsigned long           index;
181         struct dyn_ftrace       records[];
182 };
183
184 #define ENTRIES_PER_PAGE \
185   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187 /* estimate from running different kernels */
188 #define NR_TO_INIT              10000
189
190 static struct ftrace_page       *ftrace_pages_start;
191 static struct ftrace_page       *ftrace_pages;
192
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195 static int ftraced_stop;
196
197 static int ftrace_record_suspend;
198
199 static struct dyn_ftrace *ftrace_free_records;
200
201
202 #ifdef CONFIG_KPROBES
203 static inline void freeze_record(struct dyn_ftrace *rec)
204 {
205         if (!(rec->flags & FTRACE_FL_FROZEN)) {
206                 rec->flags |= FTRACE_FL_FROZEN;
207                 frozen_record_count++;
208         }
209 }
210
211 static inline void unfreeze_record(struct dyn_ftrace *rec)
212 {
213         if (rec->flags & FTRACE_FL_FROZEN) {
214                 rec->flags &= ~FTRACE_FL_FROZEN;
215                 frozen_record_count--;
216         }
217 }
218
219 static inline int record_frozen(struct dyn_ftrace *rec)
220 {
221         return rec->flags & FTRACE_FL_FROZEN;
222 }
223 #else
224 # define freeze_record(rec)                     ({ 0; })
225 # define unfreeze_record(rec)                   ({ 0; })
226 # define record_frozen(rec)                     ({ 0; })
227 #endif /* CONFIG_KPROBES */
228
229 int skip_trace(unsigned long ip)
230 {
231         unsigned long fl;
232         struct dyn_ftrace *rec;
233         struct hlist_node *t;
234         struct hlist_head *head;
235
236         if (frozen_record_count == 0)
237                 return 0;
238
239         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240         hlist_for_each_entry_rcu(rec, t, head, node) {
241                 if (rec->ip == ip) {
242                         if (record_frozen(rec)) {
243                                 if (rec->flags & FTRACE_FL_FAILED)
244                                         return 1;
245
246                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
247                                         return 1;
248
249                                 if (!tracing_on || !ftrace_enabled)
250                                         return 1;
251
252                                 if (ftrace_filtered) {
253                                         fl = rec->flags & (FTRACE_FL_FILTER |
254                                                            FTRACE_FL_NOTRACE);
255                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
256                                                 return 1;
257                                 }
258                         }
259                         break;
260                 }
261         }
262
263         return 0;
264 }
265
266 static inline int
267 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
268 {
269         struct dyn_ftrace *p;
270         struct hlist_node *t;
271         int found = 0;
272
273         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
274                 if (p->ip == ip) {
275                         found = 1;
276                         break;
277                 }
278         }
279
280         return found;
281 }
282
283 static inline void
284 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
285 {
286         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
287 }
288
289 /* called from kstop_machine */
290 static inline void ftrace_del_hash(struct dyn_ftrace *node)
291 {
292         hlist_del(&node->node);
293 }
294
295 static void ftrace_free_rec(struct dyn_ftrace *rec)
296 {
297         rec->ip = (unsigned long)ftrace_free_records;
298         ftrace_free_records = rec;
299         rec->flags |= FTRACE_FL_FREE;
300 }
301
302 void ftrace_release(void *start, unsigned long size)
303 {
304         struct dyn_ftrace *rec;
305         struct ftrace_page *pg;
306         unsigned long s = (unsigned long)start;
307         unsigned long e = s + size;
308         int i;
309
310         if (ftrace_disabled || !start)
311                 return;
312
313         /* No interrupt should call this */
314         spin_lock(&ftrace_lock);
315
316         for (pg = ftrace_pages_start; pg; pg = pg->next) {
317                 for (i = 0; i < pg->index; i++) {
318                         rec = &pg->records[i];
319
320                         if ((rec->ip >= s) && (rec->ip < e))
321                                 ftrace_free_rec(rec);
322                 }
323         }
324         spin_unlock(&ftrace_lock);
325
326 }
327
328 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
329 {
330         struct dyn_ftrace *rec;
331
332         /* First check for freed records */
333         if (ftrace_free_records) {
334                 rec = ftrace_free_records;
335
336                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
337                         WARN_ON_ONCE(1);
338                         ftrace_free_records = NULL;
339                         ftrace_disabled = 1;
340                         ftrace_enabled = 0;
341                         return NULL;
342                 }
343
344                 ftrace_free_records = (void *)rec->ip;
345                 memset(rec, 0, sizeof(*rec));
346                 return rec;
347         }
348
349         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
350                 if (!ftrace_pages->next)
351                         return NULL;
352                 ftrace_pages = ftrace_pages->next;
353         }
354
355         return &ftrace_pages->records[ftrace_pages->index++];
356 }
357
358 static void
359 ftrace_record_ip(unsigned long ip)
360 {
361         struct dyn_ftrace *node;
362         unsigned long flags;
363         unsigned long key;
364         int resched;
365         int atomic;
366         int cpu;
367
368         if (!ftrace_enabled || ftrace_disabled)
369                 return;
370
371         resched = need_resched();
372         preempt_disable_notrace();
373
374         /*
375          * We simply need to protect against recursion.
376          * Use the the raw version of smp_processor_id and not
377          * __get_cpu_var which can call debug hooks that can
378          * cause a recursive crash here.
379          */
380         cpu = raw_smp_processor_id();
381         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
382         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
383                 goto out;
384
385         if (unlikely(ftrace_record_suspend))
386                 goto out;
387
388         key = hash_long(ip, FTRACE_HASHBITS);
389
390         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
391
392         if (ftrace_ip_in_hash(ip, key))
393                 goto out;
394
395         atomic = irqs_disabled();
396
397         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
398
399         /* This ip may have hit the hash before the lock */
400         if (ftrace_ip_in_hash(ip, key))
401                 goto out_unlock;
402
403         node = ftrace_alloc_dyn_node(ip);
404         if (!node)
405                 goto out_unlock;
406
407         node->ip = ip;
408
409         ftrace_add_hash(node, key);
410
411         ftraced_trigger = 1;
412
413  out_unlock:
414         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
415  out:
416         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
417
418         /* prevent recursion with scheduler */
419         if (resched)
420                 preempt_enable_no_resched_notrace();
421         else
422                 preempt_enable_notrace();
423 }
424
425 #define FTRACE_ADDR ((long)(ftrace_caller))
426
427 static int
428 __ftrace_replace_code(struct dyn_ftrace *rec,
429                       unsigned char *old, unsigned char *new, int enable)
430 {
431         unsigned long ip, fl;
432
433         ip = rec->ip;
434
435         if (ftrace_filtered && enable) {
436                 /*
437                  * If filtering is on:
438                  *
439                  * If this record is set to be filtered and
440                  * is enabled then do nothing.
441                  *
442                  * If this record is set to be filtered and
443                  * it is not enabled, enable it.
444                  *
445                  * If this record is not set to be filtered
446                  * and it is not enabled do nothing.
447                  *
448                  * If this record is set not to trace then
449                  * do nothing.
450                  *
451                  * If this record is set not to trace and
452                  * it is enabled then disable it.
453                  *
454                  * If this record is not set to be filtered and
455                  * it is enabled, disable it.
456                  */
457
458                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
459                                    FTRACE_FL_ENABLED);
460
461                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
462                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
463                     !fl || (fl == FTRACE_FL_NOTRACE))
464                         return 0;
465
466                 /*
467                  * If it is enabled disable it,
468                  * otherwise enable it!
469                  */
470                 if (fl & FTRACE_FL_ENABLED) {
471                         /* swap new and old */
472                         new = old;
473                         old = ftrace_call_replace(ip, FTRACE_ADDR);
474                         rec->flags &= ~FTRACE_FL_ENABLED;
475                 } else {
476                         new = ftrace_call_replace(ip, FTRACE_ADDR);
477                         rec->flags |= FTRACE_FL_ENABLED;
478                 }
479         } else {
480
481                 if (enable) {
482                         /*
483                          * If this record is set not to trace and is
484                          * not enabled, do nothing.
485                          */
486                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
487                         if (fl == FTRACE_FL_NOTRACE)
488                                 return 0;
489
490                         new = ftrace_call_replace(ip, FTRACE_ADDR);
491                 } else
492                         old = ftrace_call_replace(ip, FTRACE_ADDR);
493
494                 if (enable) {
495                         if (rec->flags & FTRACE_FL_ENABLED)
496                                 return 0;
497                         rec->flags |= FTRACE_FL_ENABLED;
498                 } else {
499                         if (!(rec->flags & FTRACE_FL_ENABLED))
500                                 return 0;
501                         rec->flags &= ~FTRACE_FL_ENABLED;
502                 }
503         }
504
505         return ftrace_modify_code(ip, old, new);
506 }
507
508 static void ftrace_replace_code(int enable)
509 {
510         int i, failed;
511         unsigned char *new = NULL, *old = NULL;
512         struct dyn_ftrace *rec;
513         struct ftrace_page *pg;
514
515         if (enable)
516                 old = ftrace_nop_replace();
517         else
518                 new = ftrace_nop_replace();
519
520         for (pg = ftrace_pages_start; pg; pg = pg->next) {
521                 for (i = 0; i < pg->index; i++) {
522                         rec = &pg->records[i];
523
524                         /* don't modify code that has already faulted */
525                         if (rec->flags & FTRACE_FL_FAILED)
526                                 continue;
527
528                         /* ignore updates to this record's mcount site */
529                         if (get_kprobe((void *)rec->ip)) {
530                                 freeze_record(rec);
531                                 continue;
532                         } else {
533                                 unfreeze_record(rec);
534                         }
535
536                         failed = __ftrace_replace_code(rec, old, new, enable);
537                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
538                                 rec->flags |= FTRACE_FL_FAILED;
539                                 if ((system_state == SYSTEM_BOOTING) ||
540                                     !core_kernel_text(rec->ip)) {
541                                         ftrace_del_hash(rec);
542                                         ftrace_free_rec(rec);
543                                 }
544                         }
545                 }
546         }
547 }
548
549 static void ftrace_shutdown_replenish(void)
550 {
551         if (ftrace_pages->next)
552                 return;
553
554         /* allocate another page */
555         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
556 }
557
558 static int
559 ftrace_code_disable(struct dyn_ftrace *rec)
560 {
561         unsigned long ip;
562         unsigned char *nop, *call;
563         int failed;
564
565         ip = rec->ip;
566
567         nop = ftrace_nop_replace();
568         call = ftrace_call_replace(ip, MCOUNT_ADDR);
569
570         failed = ftrace_modify_code(ip, call, nop);
571         if (failed) {
572                 rec->flags |= FTRACE_FL_FAILED;
573                 return 0;
574         }
575         return 1;
576 }
577
578 static int __ftrace_update_code(void *ignore);
579
580 static int __ftrace_modify_code(void *data)
581 {
582         unsigned long addr;
583         int *command = data;
584
585         if (*command & FTRACE_ENABLE_CALLS) {
586                 /*
587                  * Update any recorded ips now that we have the
588                  * machine stopped
589                  */
590                 __ftrace_update_code(NULL);
591                 ftrace_replace_code(1);
592                 tracing_on = 1;
593         } else if (*command & FTRACE_DISABLE_CALLS) {
594                 ftrace_replace_code(0);
595                 tracing_on = 0;
596         }
597
598         if (*command & FTRACE_UPDATE_TRACE_FUNC)
599                 ftrace_update_ftrace_func(ftrace_trace_function);
600
601         if (*command & FTRACE_ENABLE_MCOUNT) {
602                 addr = (unsigned long)ftrace_record_ip;
603                 ftrace_mcount_set(&addr);
604         } else if (*command & FTRACE_DISABLE_MCOUNT) {
605                 addr = (unsigned long)ftrace_stub;
606                 ftrace_mcount_set(&addr);
607         }
608
609         return 0;
610 }
611
612 static void ftrace_run_update_code(int command)
613 {
614         stop_machine(__ftrace_modify_code, &command, NULL);
615 }
616
617 void ftrace_disable_daemon(void)
618 {
619         /* Stop the daemon from calling kstop_machine */
620         mutex_lock(&ftraced_lock);
621         ftraced_stop = 1;
622         mutex_unlock(&ftraced_lock);
623
624         ftrace_force_update();
625 }
626
627 void ftrace_enable_daemon(void)
628 {
629         mutex_lock(&ftraced_lock);
630         ftraced_stop = 0;
631         mutex_unlock(&ftraced_lock);
632
633         ftrace_force_update();
634 }
635
636 static ftrace_func_t saved_ftrace_func;
637
638 static void ftrace_startup(void)
639 {
640         int command = 0;
641
642         if (unlikely(ftrace_disabled))
643                 return;
644
645         mutex_lock(&ftraced_lock);
646         ftraced_suspend++;
647         if (ftraced_suspend == 1)
648                 command |= FTRACE_ENABLE_CALLS;
649
650         if (saved_ftrace_func != ftrace_trace_function) {
651                 saved_ftrace_func = ftrace_trace_function;
652                 command |= FTRACE_UPDATE_TRACE_FUNC;
653         }
654
655         if (!command || !ftrace_enabled)
656                 goto out;
657
658         ftrace_run_update_code(command);
659  out:
660         mutex_unlock(&ftraced_lock);
661 }
662
663 static void ftrace_shutdown(void)
664 {
665         int command = 0;
666
667         if (unlikely(ftrace_disabled))
668                 return;
669
670         mutex_lock(&ftraced_lock);
671         ftraced_suspend--;
672         if (!ftraced_suspend)
673                 command |= FTRACE_DISABLE_CALLS;
674
675         if (saved_ftrace_func != ftrace_trace_function) {
676                 saved_ftrace_func = ftrace_trace_function;
677                 command |= FTRACE_UPDATE_TRACE_FUNC;
678         }
679
680         if (!command || !ftrace_enabled)
681                 goto out;
682
683         ftrace_run_update_code(command);
684  out:
685         mutex_unlock(&ftraced_lock);
686 }
687
688 static void ftrace_startup_sysctl(void)
689 {
690         int command = FTRACE_ENABLE_MCOUNT;
691
692         if (unlikely(ftrace_disabled))
693                 return;
694
695         mutex_lock(&ftraced_lock);
696         /* Force update next time */
697         saved_ftrace_func = NULL;
698         /* ftraced_suspend is true if we want ftrace running */
699         if (ftraced_suspend)
700                 command |= FTRACE_ENABLE_CALLS;
701
702         ftrace_run_update_code(command);
703         mutex_unlock(&ftraced_lock);
704 }
705
706 static void ftrace_shutdown_sysctl(void)
707 {
708         int command = FTRACE_DISABLE_MCOUNT;
709
710         if (unlikely(ftrace_disabled))
711                 return;
712
713         mutex_lock(&ftraced_lock);
714         /* ftraced_suspend is true if ftrace is running */
715         if (ftraced_suspend)
716                 command |= FTRACE_DISABLE_CALLS;
717
718         ftrace_run_update_code(command);
719         mutex_unlock(&ftraced_lock);
720 }
721
722 static cycle_t          ftrace_update_time;
723 static unsigned long    ftrace_update_cnt;
724 unsigned long           ftrace_update_tot_cnt;
725
726 static int __ftrace_update_code(void *ignore)
727 {
728         int i, save_ftrace_enabled;
729         cycle_t start, stop;
730         struct dyn_ftrace *p;
731         struct hlist_node *t, *n;
732         struct hlist_head *head, temp_list;
733
734         /* Don't be recording funcs now */
735         ftrace_record_suspend++;
736         save_ftrace_enabled = ftrace_enabled;
737         ftrace_enabled = 0;
738
739         start = ftrace_now(raw_smp_processor_id());
740         ftrace_update_cnt = 0;
741
742         /* No locks needed, the machine is stopped! */
743         for (i = 0; i < FTRACE_HASHSIZE; i++) {
744                 INIT_HLIST_HEAD(&temp_list);
745                 head = &ftrace_hash[i];
746
747                 /* all CPUS are stopped, we are safe to modify code */
748                 hlist_for_each_entry_safe(p, t, n, head, node) {
749                         /* Skip over failed records which have not been
750                          * freed. */
751                         if (p->flags & FTRACE_FL_FAILED)
752                                 continue;
753
754                         /* Unconverted records are always at the head of the
755                          * hash bucket. Once we encounter a converted record,
756                          * simply skip over to the next bucket. Saves ftraced
757                          * some processor cycles (ftrace does its bid for
758                          * global warming :-p ). */
759                         if (p->flags & (FTRACE_FL_CONVERTED))
760                                 break;
761
762                         /* Ignore updates to this record's mcount site.
763                          * Reintroduce this record at the head of this
764                          * bucket to attempt to "convert" it again if
765                          * the kprobe on it is unregistered before the
766                          * next run. */
767                         if (get_kprobe((void *)p->ip)) {
768                                 ftrace_del_hash(p);
769                                 INIT_HLIST_NODE(&p->node);
770                                 hlist_add_head(&p->node, &temp_list);
771                                 freeze_record(p);
772                                 continue;
773                         } else {
774                                 unfreeze_record(p);
775                         }
776
777                         /* convert record (i.e, patch mcount-call with NOP) */
778                         if (ftrace_code_disable(p)) {
779                                 p->flags |= FTRACE_FL_CONVERTED;
780                                 ftrace_update_cnt++;
781                         } else {
782                                 if ((system_state == SYSTEM_BOOTING) ||
783                                     !core_kernel_text(p->ip)) {
784                                         ftrace_del_hash(p);
785                                         ftrace_free_rec(p);
786                                 }
787                         }
788                 }
789
790                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
791                         hlist_del(&p->node);
792                         INIT_HLIST_NODE(&p->node);
793                         hlist_add_head(&p->node, head);
794                 }
795         }
796
797         stop = ftrace_now(raw_smp_processor_id());
798         ftrace_update_time = stop - start;
799         ftrace_update_tot_cnt += ftrace_update_cnt;
800         ftraced_trigger = 0;
801
802         ftrace_enabled = save_ftrace_enabled;
803         ftrace_record_suspend--;
804
805         return 0;
806 }
807
808 static int ftrace_update_code(void)
809 {
810         if (unlikely(ftrace_disabled) ||
811             !ftrace_enabled || !ftraced_trigger)
812                 return 0;
813
814         stop_machine(__ftrace_update_code, NULL, NULL);
815
816         return 1;
817 }
818
819 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
820 {
821         struct ftrace_page *pg;
822         int cnt;
823         int i;
824
825         /* allocate a few pages */
826         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
827         if (!ftrace_pages_start)
828                 return -1;
829
830         /*
831          * Allocate a few more pages.
832          *
833          * TODO: have some parser search vmlinux before
834          *   final linking to find all calls to ftrace.
835          *   Then we can:
836          *    a) know how many pages to allocate.
837          *     and/or
838          *    b) set up the table then.
839          *
840          *  The dynamic code is still necessary for
841          *  modules.
842          */
843
844         pg = ftrace_pages = ftrace_pages_start;
845
846         cnt = num_to_init / ENTRIES_PER_PAGE;
847         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
848                 num_to_init, cnt);
849
850         for (i = 0; i < cnt; i++) {
851                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
852
853                 /* If we fail, we'll try later anyway */
854                 if (!pg->next)
855                         break;
856
857                 pg = pg->next;
858         }
859
860         return 0;
861 }
862
863 enum {
864         FTRACE_ITER_FILTER      = (1 << 0),
865         FTRACE_ITER_CONT        = (1 << 1),
866         FTRACE_ITER_NOTRACE     = (1 << 2),
867         FTRACE_ITER_FAILURES    = (1 << 3),
868 };
869
870 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
871
872 struct ftrace_iterator {
873         loff_t                  pos;
874         struct ftrace_page      *pg;
875         unsigned                idx;
876         unsigned                flags;
877         unsigned char           buffer[FTRACE_BUFF_MAX+1];
878         unsigned                buffer_idx;
879         unsigned                filtered;
880 };
881
882 static void *
883 t_next(struct seq_file *m, void *v, loff_t *pos)
884 {
885         struct ftrace_iterator *iter = m->private;
886         struct dyn_ftrace *rec = NULL;
887
888         (*pos)++;
889
890  retry:
891         if (iter->idx >= iter->pg->index) {
892                 if (iter->pg->next) {
893                         iter->pg = iter->pg->next;
894                         iter->idx = 0;
895                         goto retry;
896                 }
897         } else {
898                 rec = &iter->pg->records[iter->idx++];
899                 if ((rec->flags & FTRACE_FL_FREE) ||
900
901                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
902                      (rec->flags & FTRACE_FL_FAILED)) ||
903
904                     ((iter->flags & FTRACE_ITER_FAILURES) &&
905                      !(rec->flags & FTRACE_FL_FAILED)) ||
906
907                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
908                      !(rec->flags & FTRACE_FL_NOTRACE))) {
909                         rec = NULL;
910                         goto retry;
911                 }
912         }
913
914         iter->pos = *pos;
915
916         return rec;
917 }
918
919 static void *t_start(struct seq_file *m, loff_t *pos)
920 {
921         struct ftrace_iterator *iter = m->private;
922         void *p = NULL;
923         loff_t l = -1;
924
925         if (*pos != iter->pos) {
926                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
927                         ;
928         } else {
929                 l = *pos;
930                 p = t_next(m, p, &l);
931         }
932
933         return p;
934 }
935
936 static void t_stop(struct seq_file *m, void *p)
937 {
938 }
939
940 static int t_show(struct seq_file *m, void *v)
941 {
942         struct dyn_ftrace *rec = v;
943         char str[KSYM_SYMBOL_LEN];
944
945         if (!rec)
946                 return 0;
947
948         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
949
950         seq_printf(m, "%s\n", str);
951
952         return 0;
953 }
954
955 static struct seq_operations show_ftrace_seq_ops = {
956         .start = t_start,
957         .next = t_next,
958         .stop = t_stop,
959         .show = t_show,
960 };
961
962 static int
963 ftrace_avail_open(struct inode *inode, struct file *file)
964 {
965         struct ftrace_iterator *iter;
966         int ret;
967
968         if (unlikely(ftrace_disabled))
969                 return -ENODEV;
970
971         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
972         if (!iter)
973                 return -ENOMEM;
974
975         iter->pg = ftrace_pages_start;
976         iter->pos = -1;
977
978         ret = seq_open(file, &show_ftrace_seq_ops);
979         if (!ret) {
980                 struct seq_file *m = file->private_data;
981
982                 m->private = iter;
983         } else {
984                 kfree(iter);
985         }
986
987         return ret;
988 }
989
990 int ftrace_avail_release(struct inode *inode, struct file *file)
991 {
992         struct seq_file *m = (struct seq_file *)file->private_data;
993         struct ftrace_iterator *iter = m->private;
994
995         seq_release(inode, file);
996         kfree(iter);
997
998         return 0;
999 }
1000
1001 static int
1002 ftrace_failures_open(struct inode *inode, struct file *file)
1003 {
1004         int ret;
1005         struct seq_file *m;
1006         struct ftrace_iterator *iter;
1007
1008         ret = ftrace_avail_open(inode, file);
1009         if (!ret) {
1010                 m = (struct seq_file *)file->private_data;
1011                 iter = (struct ftrace_iterator *)m->private;
1012                 iter->flags = FTRACE_ITER_FAILURES;
1013         }
1014
1015         return ret;
1016 }
1017
1018
1019 static void ftrace_filter_reset(int enable)
1020 {
1021         struct ftrace_page *pg;
1022         struct dyn_ftrace *rec;
1023         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1024         unsigned i;
1025
1026         /* keep kstop machine from running */
1027         preempt_disable();
1028         if (enable)
1029                 ftrace_filtered = 0;
1030         pg = ftrace_pages_start;
1031         while (pg) {
1032                 for (i = 0; i < pg->index; i++) {
1033                         rec = &pg->records[i];
1034                         if (rec->flags & FTRACE_FL_FAILED)
1035                                 continue;
1036                         rec->flags &= ~type;
1037                 }
1038                 pg = pg->next;
1039         }
1040         preempt_enable();
1041 }
1042
1043 static int
1044 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1045 {
1046         struct ftrace_iterator *iter;
1047         int ret = 0;
1048
1049         if (unlikely(ftrace_disabled))
1050                 return -ENODEV;
1051
1052         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1053         if (!iter)
1054                 return -ENOMEM;
1055
1056         mutex_lock(&ftrace_regex_lock);
1057         if ((file->f_mode & FMODE_WRITE) &&
1058             !(file->f_flags & O_APPEND))
1059                 ftrace_filter_reset(enable);
1060
1061         if (file->f_mode & FMODE_READ) {
1062                 iter->pg = ftrace_pages_start;
1063                 iter->pos = -1;
1064                 iter->flags = enable ? FTRACE_ITER_FILTER :
1065                         FTRACE_ITER_NOTRACE;
1066
1067                 ret = seq_open(file, &show_ftrace_seq_ops);
1068                 if (!ret) {
1069                         struct seq_file *m = file->private_data;
1070                         m->private = iter;
1071                 } else
1072                         kfree(iter);
1073         } else
1074                 file->private_data = iter;
1075         mutex_unlock(&ftrace_regex_lock);
1076
1077         return ret;
1078 }
1079
1080 static int
1081 ftrace_filter_open(struct inode *inode, struct file *file)
1082 {
1083         return ftrace_regex_open(inode, file, 1);
1084 }
1085
1086 static int
1087 ftrace_notrace_open(struct inode *inode, struct file *file)
1088 {
1089         return ftrace_regex_open(inode, file, 0);
1090 }
1091
1092 static ssize_t
1093 ftrace_regex_read(struct file *file, char __user *ubuf,
1094                        size_t cnt, loff_t *ppos)
1095 {
1096         if (file->f_mode & FMODE_READ)
1097                 return seq_read(file, ubuf, cnt, ppos);
1098         else
1099                 return -EPERM;
1100 }
1101
1102 static loff_t
1103 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1104 {
1105         loff_t ret;
1106
1107         if (file->f_mode & FMODE_READ)
1108                 ret = seq_lseek(file, offset, origin);
1109         else
1110                 file->f_pos = ret = 1;
1111
1112         return ret;
1113 }
1114
1115 enum {
1116         MATCH_FULL,
1117         MATCH_FRONT_ONLY,
1118         MATCH_MIDDLE_ONLY,
1119         MATCH_END_ONLY,
1120 };
1121
1122 static void
1123 ftrace_match(unsigned char *buff, int len, int enable)
1124 {
1125         char str[KSYM_SYMBOL_LEN];
1126         char *search = NULL;
1127         struct ftrace_page *pg;
1128         struct dyn_ftrace *rec;
1129         int type = MATCH_FULL;
1130         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1131         unsigned i, match = 0, search_len = 0;
1132
1133         for (i = 0; i < len; i++) {
1134                 if (buff[i] == '*') {
1135                         if (!i) {
1136                                 search = buff + i + 1;
1137                                 type = MATCH_END_ONLY;
1138                                 search_len = len - (i + 1);
1139                         } else {
1140                                 if (type == MATCH_END_ONLY) {
1141                                         type = MATCH_MIDDLE_ONLY;
1142                                 } else {
1143                                         match = i;
1144                                         type = MATCH_FRONT_ONLY;
1145                                 }
1146                                 buff[i] = 0;
1147                                 break;
1148                         }
1149                 }
1150         }
1151
1152         /* keep kstop machine from running */
1153         preempt_disable();
1154         if (enable)
1155                 ftrace_filtered = 1;
1156         pg = ftrace_pages_start;
1157         while (pg) {
1158                 for (i = 0; i < pg->index; i++) {
1159                         int matched = 0;
1160                         char *ptr;
1161
1162                         rec = &pg->records[i];
1163                         if (rec->flags & FTRACE_FL_FAILED)
1164                                 continue;
1165                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1166                         switch (type) {
1167                         case MATCH_FULL:
1168                                 if (strcmp(str, buff) == 0)
1169                                         matched = 1;
1170                                 break;
1171                         case MATCH_FRONT_ONLY:
1172                                 if (memcmp(str, buff, match) == 0)
1173                                         matched = 1;
1174                                 break;
1175                         case MATCH_MIDDLE_ONLY:
1176                                 if (strstr(str, search))
1177                                         matched = 1;
1178                                 break;
1179                         case MATCH_END_ONLY:
1180                                 ptr = strstr(str, search);
1181                                 if (ptr && (ptr[search_len] == 0))
1182                                         matched = 1;
1183                                 break;
1184                         }
1185                         if (matched)
1186                                 rec->flags |= flag;
1187                 }
1188                 pg = pg->next;
1189         }
1190         preempt_enable();
1191 }
1192
1193 static ssize_t
1194 ftrace_regex_write(struct file *file, const char __user *ubuf,
1195                    size_t cnt, loff_t *ppos, int enable)
1196 {
1197         struct ftrace_iterator *iter;
1198         char ch;
1199         size_t read = 0;
1200         ssize_t ret;
1201
1202         if (!cnt || cnt < 0)
1203                 return 0;
1204
1205         mutex_lock(&ftrace_regex_lock);
1206
1207         if (file->f_mode & FMODE_READ) {
1208                 struct seq_file *m = file->private_data;
1209                 iter = m->private;
1210         } else
1211                 iter = file->private_data;
1212
1213         if (!*ppos) {
1214                 iter->flags &= ~FTRACE_ITER_CONT;
1215                 iter->buffer_idx = 0;
1216         }
1217
1218         ret = get_user(ch, ubuf++);
1219         if (ret)
1220                 goto out;
1221         read++;
1222         cnt--;
1223
1224         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1225                 /* skip white space */
1226                 while (cnt && isspace(ch)) {
1227                         ret = get_user(ch, ubuf++);
1228                         if (ret)
1229                                 goto out;
1230                         read++;
1231                         cnt--;
1232                 }
1233
1234                 if (isspace(ch)) {
1235                         file->f_pos += read;
1236                         ret = read;
1237                         goto out;
1238                 }
1239
1240                 iter->buffer_idx = 0;
1241         }
1242
1243         while (cnt && !isspace(ch)) {
1244                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1245                         iter->buffer[iter->buffer_idx++] = ch;
1246                 else {
1247                         ret = -EINVAL;
1248                         goto out;
1249                 }
1250                 ret = get_user(ch, ubuf++);
1251                 if (ret)
1252                         goto out;
1253                 read++;
1254                 cnt--;
1255         }
1256
1257         if (isspace(ch)) {
1258                 iter->filtered++;
1259                 iter->buffer[iter->buffer_idx] = 0;
1260                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1261                 iter->buffer_idx = 0;
1262         } else
1263                 iter->flags |= FTRACE_ITER_CONT;
1264
1265
1266         file->f_pos += read;
1267
1268         ret = read;
1269  out:
1270         mutex_unlock(&ftrace_regex_lock);
1271
1272         return ret;
1273 }
1274
1275 static ssize_t
1276 ftrace_filter_write(struct file *file, const char __user *ubuf,
1277                     size_t cnt, loff_t *ppos)
1278 {
1279         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1280 }
1281
1282 static ssize_t
1283 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1284                      size_t cnt, loff_t *ppos)
1285 {
1286         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1287 }
1288
1289 static void
1290 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1291 {
1292         if (unlikely(ftrace_disabled))
1293                 return;
1294
1295         mutex_lock(&ftrace_regex_lock);
1296         if (reset)
1297                 ftrace_filter_reset(enable);
1298         if (buf)
1299                 ftrace_match(buf, len, enable);
1300         mutex_unlock(&ftrace_regex_lock);
1301 }
1302
1303 /**
1304  * ftrace_set_filter - set a function to filter on in ftrace
1305  * @buf - the string that holds the function filter text.
1306  * @len - the length of the string.
1307  * @reset - non zero to reset all filters before applying this filter.
1308  *
1309  * Filters denote which functions should be enabled when tracing is enabled.
1310  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1311  */
1312 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1313 {
1314         ftrace_set_regex(buf, len, reset, 1);
1315 }
1316
1317 /**
1318  * ftrace_set_notrace - set a function to not trace in ftrace
1319  * @buf - the string that holds the function notrace text.
1320  * @len - the length of the string.
1321  * @reset - non zero to reset all filters before applying this filter.
1322  *
1323  * Notrace Filters denote which functions should not be enabled when tracing
1324  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1325  * for tracing.
1326  */
1327 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1328 {
1329         ftrace_set_regex(buf, len, reset, 0);
1330 }
1331
1332 static int
1333 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1334 {
1335         struct seq_file *m = (struct seq_file *)file->private_data;
1336         struct ftrace_iterator *iter;
1337
1338         mutex_lock(&ftrace_regex_lock);
1339         if (file->f_mode & FMODE_READ) {
1340                 iter = m->private;
1341
1342                 seq_release(inode, file);
1343         } else
1344                 iter = file->private_data;
1345
1346         if (iter->buffer_idx) {
1347                 iter->filtered++;
1348                 iter->buffer[iter->buffer_idx] = 0;
1349                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1350         }
1351
1352         mutex_lock(&ftrace_sysctl_lock);
1353         mutex_lock(&ftraced_lock);
1354         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1355                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1356         mutex_unlock(&ftraced_lock);
1357         mutex_unlock(&ftrace_sysctl_lock);
1358
1359         kfree(iter);
1360         mutex_unlock(&ftrace_regex_lock);
1361         return 0;
1362 }
1363
1364 static int
1365 ftrace_filter_release(struct inode *inode, struct file *file)
1366 {
1367         return ftrace_regex_release(inode, file, 1);
1368 }
1369
1370 static int
1371 ftrace_notrace_release(struct inode *inode, struct file *file)
1372 {
1373         return ftrace_regex_release(inode, file, 0);
1374 }
1375
1376 static ssize_t
1377 ftraced_read(struct file *filp, char __user *ubuf,
1378                      size_t cnt, loff_t *ppos)
1379 {
1380         /* don't worry about races */
1381         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1382         int r = strlen(buf);
1383
1384         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1385 }
1386
1387 static ssize_t
1388 ftraced_write(struct file *filp, const char __user *ubuf,
1389                       size_t cnt, loff_t *ppos)
1390 {
1391         char buf[64];
1392         long val;
1393         int ret;
1394
1395         if (cnt >= sizeof(buf))
1396                 return -EINVAL;
1397
1398         if (copy_from_user(&buf, ubuf, cnt))
1399                 return -EFAULT;
1400
1401         if (strncmp(buf, "enable", 6) == 0)
1402                 val = 1;
1403         else if (strncmp(buf, "disable", 7) == 0)
1404                 val = 0;
1405         else {
1406                 buf[cnt] = 0;
1407
1408                 ret = strict_strtoul(buf, 10, &val);
1409                 if (ret < 0)
1410                         return ret;
1411
1412                 val = !!val;
1413         }
1414
1415         if (val)
1416                 ftrace_enable_daemon();
1417         else
1418                 ftrace_disable_daemon();
1419
1420         filp->f_pos += cnt;
1421
1422         return cnt;
1423 }
1424
1425 static struct file_operations ftrace_avail_fops = {
1426         .open = ftrace_avail_open,
1427         .read = seq_read,
1428         .llseek = seq_lseek,
1429         .release = ftrace_avail_release,
1430 };
1431
1432 static struct file_operations ftrace_failures_fops = {
1433         .open = ftrace_failures_open,
1434         .read = seq_read,
1435         .llseek = seq_lseek,
1436         .release = ftrace_avail_release,
1437 };
1438
1439 static struct file_operations ftrace_filter_fops = {
1440         .open = ftrace_filter_open,
1441         .read = ftrace_regex_read,
1442         .write = ftrace_filter_write,
1443         .llseek = ftrace_regex_lseek,
1444         .release = ftrace_filter_release,
1445 };
1446
1447 static struct file_operations ftrace_notrace_fops = {
1448         .open = ftrace_notrace_open,
1449         .read = ftrace_regex_read,
1450         .write = ftrace_notrace_write,
1451         .llseek = ftrace_regex_lseek,
1452         .release = ftrace_notrace_release,
1453 };
1454
1455 static struct file_operations ftraced_fops = {
1456         .open = tracing_open_generic,
1457         .read = ftraced_read,
1458         .write = ftraced_write,
1459 };
1460
1461 /**
1462  * ftrace_force_update - force an update to all recording ftrace functions
1463  */
1464 int ftrace_force_update(void)
1465 {
1466         int ret = 0;
1467
1468         if (unlikely(ftrace_disabled))
1469                 return -ENODEV;
1470
1471         mutex_lock(&ftrace_sysctl_lock);
1472         mutex_lock(&ftraced_lock);
1473
1474         /*
1475          * If ftraced_trigger is not set, then there is nothing
1476          * to update.
1477          */
1478         if (ftraced_trigger && !ftrace_update_code())
1479                 ret = -EBUSY;
1480
1481         mutex_unlock(&ftraced_lock);
1482         mutex_unlock(&ftrace_sysctl_lock);
1483
1484         return ret;
1485 }
1486
1487 static void ftrace_force_shutdown(void)
1488 {
1489         struct task_struct *task;
1490         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1491
1492         mutex_lock(&ftraced_lock);
1493         task = ftraced_task;
1494         ftraced_task = NULL;
1495         ftraced_suspend = -1;
1496         ftrace_run_update_code(command);
1497         mutex_unlock(&ftraced_lock);
1498
1499         if (task)
1500                 kthread_stop(task);
1501 }
1502
1503 static __init int ftrace_init_debugfs(void)
1504 {
1505         struct dentry *d_tracer;
1506         struct dentry *entry;
1507
1508         d_tracer = tracing_init_dentry();
1509
1510         entry = debugfs_create_file("available_filter_functions", 0444,
1511                                     d_tracer, NULL, &ftrace_avail_fops);
1512         if (!entry)
1513                 pr_warning("Could not create debugfs "
1514                            "'available_filter_functions' entry\n");
1515
1516         entry = debugfs_create_file("failures", 0444,
1517                                     d_tracer, NULL, &ftrace_failures_fops);
1518         if (!entry)
1519                 pr_warning("Could not create debugfs 'failures' entry\n");
1520
1521         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1522                                     NULL, &ftrace_filter_fops);
1523         if (!entry)
1524                 pr_warning("Could not create debugfs "
1525                            "'set_ftrace_filter' entry\n");
1526
1527         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1528                                     NULL, &ftrace_notrace_fops);
1529         if (!entry)
1530                 pr_warning("Could not create debugfs "
1531                            "'set_ftrace_notrace' entry\n");
1532
1533         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1534                                     NULL, &ftraced_fops);
1535         if (!entry)
1536                 pr_warning("Could not create debugfs "
1537                            "'ftraced_enabled' entry\n");
1538         return 0;
1539 }
1540
1541 fs_initcall(ftrace_init_debugfs);
1542
1543 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1544 static int ftrace_convert_nops(unsigned long *start,
1545                                unsigned long *end)
1546 {
1547         unsigned long *p;
1548         unsigned long addr;
1549         unsigned long flags;
1550
1551         p = start;
1552         while (p < end) {
1553                 addr = ftrace_call_adjust(*p++);
1554                 spin_lock(&ftrace_lock);
1555                 ftrace_record_ip(addr);
1556                 spin_unlock(&ftrace_lock);
1557                 ftrace_shutdown_replenish();
1558         }
1559
1560         /* p is ignored */
1561         local_irq_save(flags);
1562         __ftrace_update_code(p);
1563         local_irq_restore(flags);
1564
1565         return 0;
1566 }
1567
1568 void ftrace_init_module(unsigned long *start, unsigned long *end)
1569 {
1570         if (ftrace_disabled || start == end)
1571                 return;
1572         ftrace_convert_nops(start, end);
1573 }
1574
1575 extern unsigned long __start_mcount_loc[];
1576 extern unsigned long __stop_mcount_loc[];
1577
1578 void __init ftrace_init(void)
1579 {
1580         unsigned long count, addr, flags;
1581         int ret;
1582
1583         /* Keep the ftrace pointer to the stub */
1584         addr = (unsigned long)ftrace_stub;
1585
1586         local_irq_save(flags);
1587         ftrace_dyn_arch_init(&addr);
1588         local_irq_restore(flags);
1589
1590         /* ftrace_dyn_arch_init places the return code in addr */
1591         if (addr)
1592                 goto failed;
1593
1594         count = __stop_mcount_loc - __start_mcount_loc;
1595
1596         ret = ftrace_dyn_table_alloc(count);
1597         if (ret)
1598                 goto failed;
1599
1600         last_ftrace_enabled = ftrace_enabled = 1;
1601
1602         ret = ftrace_convert_nops(__start_mcount_loc,
1603                                   __stop_mcount_loc);
1604
1605         return;
1606  failed:
1607         ftrace_disabled = 1;
1608 }
1609 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1610 static int ftraced(void *ignore)
1611 {
1612         unsigned long usecs;
1613
1614         while (!kthread_should_stop()) {
1615
1616                 set_current_state(TASK_INTERRUPTIBLE);
1617
1618                 /* check once a second */
1619                 schedule_timeout(HZ);
1620
1621                 if (unlikely(ftrace_disabled))
1622                         continue;
1623
1624                 mutex_lock(&ftrace_sysctl_lock);
1625                 mutex_lock(&ftraced_lock);
1626                 if (!ftraced_suspend && !ftraced_stop &&
1627                     ftrace_update_code()) {
1628                         usecs = nsecs_to_usecs(ftrace_update_time);
1629                         if (ftrace_update_tot_cnt > 100000) {
1630                                 ftrace_update_tot_cnt = 0;
1631                                 pr_info("hm, dftrace overflow: %lu change%s"
1632                                         " (%lu total) in %lu usec%s\n",
1633                                         ftrace_update_cnt,
1634                                         ftrace_update_cnt != 1 ? "s" : "",
1635                                         ftrace_update_tot_cnt,
1636                                         usecs, usecs != 1 ? "s" : "");
1637                                 ftrace_disabled = 1;
1638                                 WARN_ON_ONCE(1);
1639                         }
1640                 }
1641                 mutex_unlock(&ftraced_lock);
1642                 mutex_unlock(&ftrace_sysctl_lock);
1643
1644                 ftrace_shutdown_replenish();
1645         }
1646         __set_current_state(TASK_RUNNING);
1647         return 0;
1648 }
1649
1650 static int __init ftrace_dynamic_init(void)
1651 {
1652         struct task_struct *p;
1653         unsigned long addr;
1654         int ret;
1655
1656         addr = (unsigned long)ftrace_record_ip;
1657
1658         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1659
1660         /* ftrace_dyn_arch_init places the return code in addr */
1661         if (addr) {
1662                 ret = (int)addr;
1663                 goto failed;
1664         }
1665
1666         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1667         if (ret)
1668                 goto failed;
1669
1670         p = kthread_run(ftraced, NULL, "ftraced");
1671         if (IS_ERR(p)) {
1672                 ret = -1;
1673                 goto failed;
1674         }
1675
1676         last_ftrace_enabled = ftrace_enabled = 1;
1677         ftraced_task = p;
1678
1679         return 0;
1680
1681  failed:
1682         ftrace_disabled = 1;
1683         return ret;
1684 }
1685
1686 core_initcall(ftrace_dynamic_init);
1687 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1688
1689 #else
1690 # define ftrace_startup()               do { } while (0)
1691 # define ftrace_shutdown()              do { } while (0)
1692 # define ftrace_startup_sysctl()        do { } while (0)
1693 # define ftrace_shutdown_sysctl()       do { } while (0)
1694 # define ftrace_force_shutdown()        do { } while (0)
1695 #endif /* CONFIG_DYNAMIC_FTRACE */
1696
1697 /**
1698  * ftrace_kill_atomic - kill ftrace from critical sections
1699  *
1700  * This function should be used by panic code. It stops ftrace
1701  * but in a not so nice way. If you need to simply kill ftrace
1702  * from a non-atomic section, use ftrace_kill.
1703  */
1704 void ftrace_kill_atomic(void)
1705 {
1706         ftrace_disabled = 1;
1707         ftrace_enabled = 0;
1708 #ifdef CONFIG_DYNAMIC_FTRACE
1709         ftraced_suspend = -1;
1710 #endif
1711         clear_ftrace_function();
1712 }
1713
1714 /**
1715  * ftrace_kill - totally shutdown ftrace
1716  *
1717  * This is a safety measure. If something was detected that seems
1718  * wrong, calling this function will keep ftrace from doing
1719  * any more modifications, and updates.
1720  * used when something went wrong.
1721  */
1722 void ftrace_kill(void)
1723 {
1724         mutex_lock(&ftrace_sysctl_lock);
1725         ftrace_disabled = 1;
1726         ftrace_enabled = 0;
1727
1728         clear_ftrace_function();
1729         mutex_unlock(&ftrace_sysctl_lock);
1730
1731         /* Try to totally disable ftrace */
1732         ftrace_force_shutdown();
1733 }
1734
1735 /**
1736  * register_ftrace_function - register a function for profiling
1737  * @ops - ops structure that holds the function for profiling.
1738  *
1739  * Register a function to be called by all functions in the
1740  * kernel.
1741  *
1742  * Note: @ops->func and all the functions it calls must be labeled
1743  *       with "notrace", otherwise it will go into a
1744  *       recursive loop.
1745  */
1746 int register_ftrace_function(struct ftrace_ops *ops)
1747 {
1748         int ret;
1749
1750         if (unlikely(ftrace_disabled))
1751                 return -1;
1752
1753         mutex_lock(&ftrace_sysctl_lock);
1754         ret = __register_ftrace_function(ops);
1755         ftrace_startup();
1756         mutex_unlock(&ftrace_sysctl_lock);
1757
1758         return ret;
1759 }
1760
1761 /**
1762  * unregister_ftrace_function - unresgister a function for profiling.
1763  * @ops - ops structure that holds the function to unregister
1764  *
1765  * Unregister a function that was added to be called by ftrace profiling.
1766  */
1767 int unregister_ftrace_function(struct ftrace_ops *ops)
1768 {
1769         int ret;
1770
1771         mutex_lock(&ftrace_sysctl_lock);
1772         ret = __unregister_ftrace_function(ops);
1773         ftrace_shutdown();
1774         mutex_unlock(&ftrace_sysctl_lock);
1775
1776         return ret;
1777 }
1778
1779 int
1780 ftrace_enable_sysctl(struct ctl_table *table, int write,
1781                      struct file *file, void __user *buffer, size_t *lenp,
1782                      loff_t *ppos)
1783 {
1784         int ret;
1785
1786         if (unlikely(ftrace_disabled))
1787                 return -ENODEV;
1788
1789         mutex_lock(&ftrace_sysctl_lock);
1790
1791         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1792
1793         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1794                 goto out;
1795
1796         last_ftrace_enabled = ftrace_enabled;
1797
1798         if (ftrace_enabled) {
1799
1800                 ftrace_startup_sysctl();
1801
1802                 /* we are starting ftrace again */
1803                 if (ftrace_list != &ftrace_list_end) {
1804                         if (ftrace_list->next == &ftrace_list_end)
1805                                 ftrace_trace_function = ftrace_list->func;
1806                         else
1807                                 ftrace_trace_function = ftrace_list_func;
1808                 }
1809
1810         } else {
1811                 /* stopping ftrace calls (just send to ftrace_stub) */
1812                 ftrace_trace_function = ftrace_stub;
1813
1814                 ftrace_shutdown_sysctl();
1815         }
1816
1817  out:
1818         mutex_unlock(&ftrace_sysctl_lock);
1819         return ret;
1820 }