]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: remove daemon
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 #define FTRACE_WARN_ON(cond)                    \
36         do {                                    \
37                 if (WARN_ON(cond))              \
38                         ftrace_kill();          \
39         } while (0)
40
41 #define FTRACE_WARN_ON_ONCE(cond)               \
42         do {                                    \
43                 if (WARN_ON_ONCE(cond))         \
44                         ftrace_kill();          \
45         } while (0)
46
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
50
51 /*
52  * ftrace_disabled is set when an anomaly is discovered.
53  * ftrace_disabled is much stronger than ftrace_enabled.
54  */
55 static int ftrace_disabled __read_mostly;
56
57 static DEFINE_SPINLOCK(ftrace_lock);
58 static DEFINE_MUTEX(ftrace_sysctl_lock);
59
60 static struct ftrace_ops ftrace_list_end __read_mostly =
61 {
62         .func = ftrace_stub,
63 };
64
65 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
66 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
67
68 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
69 {
70         struct ftrace_ops *op = ftrace_list;
71
72         /* in case someone actually ports this to alpha! */
73         read_barrier_depends();
74
75         while (op != &ftrace_list_end) {
76                 /* silly alpha */
77                 read_barrier_depends();
78                 op->func(ip, parent_ip);
79                 op = op->next;
80         };
81 }
82
83 /**
84  * clear_ftrace_function - reset the ftrace function
85  *
86  * This NULLs the ftrace function and in essence stops
87  * tracing.  There may be lag
88  */
89 void clear_ftrace_function(void)
90 {
91         ftrace_trace_function = ftrace_stub;
92 }
93
94 static int __register_ftrace_function(struct ftrace_ops *ops)
95 {
96         /* should not be called from interrupt context */
97         spin_lock(&ftrace_lock);
98
99         ops->next = ftrace_list;
100         /*
101          * We are entering ops into the ftrace_list but another
102          * CPU might be walking that list. We need to make sure
103          * the ops->next pointer is valid before another CPU sees
104          * the ops pointer included into the ftrace_list.
105          */
106         smp_wmb();
107         ftrace_list = ops;
108
109         if (ftrace_enabled) {
110                 /*
111                  * For one func, simply call it directly.
112                  * For more than one func, call the chain.
113                  */
114                 if (ops->next == &ftrace_list_end)
115                         ftrace_trace_function = ops->func;
116                 else
117                         ftrace_trace_function = ftrace_list_func;
118         }
119
120         spin_unlock(&ftrace_lock);
121
122         return 0;
123 }
124
125 static int __unregister_ftrace_function(struct ftrace_ops *ops)
126 {
127         struct ftrace_ops **p;
128         int ret = 0;
129
130         /* should not be called from interrupt context */
131         spin_lock(&ftrace_lock);
132
133         /*
134          * If we are removing the last function, then simply point
135          * to the ftrace_stub.
136          */
137         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
138                 ftrace_trace_function = ftrace_stub;
139                 ftrace_list = &ftrace_list_end;
140                 goto out;
141         }
142
143         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
144                 if (*p == ops)
145                         break;
146
147         if (*p != ops) {
148                 ret = -1;
149                 goto out;
150         }
151
152         *p = (*p)->next;
153
154         if (ftrace_enabled) {
155                 /* If we only have one func left, then call that directly */
156                 if (ftrace_list == &ftrace_list_end ||
157                     ftrace_list->next == &ftrace_list_end)
158                         ftrace_trace_function = ftrace_list->func;
159         }
160
161  out:
162         spin_unlock(&ftrace_lock);
163
164         return ret;
165 }
166
167 #ifdef CONFIG_DYNAMIC_FTRACE
168 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
169 # error Dynamic ftrace depends on MCOUNT_RECORD
170 #endif
171
172 /*
173  * Since MCOUNT_ADDR may point to mcount itself, we do not want
174  * to get it confused by reading a reference in the code as we
175  * are parsing on objcopy output of text. Use a variable for
176  * it instead.
177  */
178 static unsigned long mcount_addr = MCOUNT_ADDR;
179
180 enum {
181         FTRACE_ENABLE_CALLS             = (1 << 0),
182         FTRACE_DISABLE_CALLS            = (1 << 1),
183         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
184         FTRACE_ENABLE_MCOUNT            = (1 << 3),
185         FTRACE_DISABLE_MCOUNT           = (1 << 4),
186 };
187
188 static int ftrace_filtered;
189 static int tracing_on;
190 static int frozen_record_count;
191
192 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
193
194 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
195
196 static DEFINE_MUTEX(ftrace_regex_lock);
197
198 struct ftrace_page {
199         struct ftrace_page      *next;
200         unsigned long           index;
201         struct dyn_ftrace       records[];
202 };
203
204 #define ENTRIES_PER_PAGE \
205   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
206
207 /* estimate from running different kernels */
208 #define NR_TO_INIT              10000
209
210 static struct ftrace_page       *ftrace_pages_start;
211 static struct ftrace_page       *ftrace_pages;
212
213 static int ftrace_record_suspend;
214
215 static struct dyn_ftrace *ftrace_free_records;
216
217
218 #ifdef CONFIG_KPROBES
219 static inline void freeze_record(struct dyn_ftrace *rec)
220 {
221         if (!(rec->flags & FTRACE_FL_FROZEN)) {
222                 rec->flags |= FTRACE_FL_FROZEN;
223                 frozen_record_count++;
224         }
225 }
226
227 static inline void unfreeze_record(struct dyn_ftrace *rec)
228 {
229         if (rec->flags & FTRACE_FL_FROZEN) {
230                 rec->flags &= ~FTRACE_FL_FROZEN;
231                 frozen_record_count--;
232         }
233 }
234
235 static inline int record_frozen(struct dyn_ftrace *rec)
236 {
237         return rec->flags & FTRACE_FL_FROZEN;
238 }
239 #else
240 # define freeze_record(rec)                     ({ 0; })
241 # define unfreeze_record(rec)                   ({ 0; })
242 # define record_frozen(rec)                     ({ 0; })
243 #endif /* CONFIG_KPROBES */
244
245 int skip_trace(unsigned long ip)
246 {
247         unsigned long fl;
248         struct dyn_ftrace *rec;
249         struct hlist_node *t;
250         struct hlist_head *head;
251
252         if (frozen_record_count == 0)
253                 return 0;
254
255         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
256         hlist_for_each_entry_rcu(rec, t, head, node) {
257                 if (rec->ip == ip) {
258                         if (record_frozen(rec)) {
259                                 if (rec->flags & FTRACE_FL_FAILED)
260                                         return 1;
261
262                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
263                                         return 1;
264
265                                 if (!tracing_on || !ftrace_enabled)
266                                         return 1;
267
268                                 if (ftrace_filtered) {
269                                         fl = rec->flags & (FTRACE_FL_FILTER |
270                                                            FTRACE_FL_NOTRACE);
271                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
272                                                 return 1;
273                                 }
274                         }
275                         break;
276                 }
277         }
278
279         return 0;
280 }
281
282 static inline int
283 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
284 {
285         struct dyn_ftrace *p;
286         struct hlist_node *t;
287         int found = 0;
288
289         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
290                 if (p->ip == ip) {
291                         found = 1;
292                         break;
293                 }
294         }
295
296         return found;
297 }
298
299 static inline void
300 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
301 {
302         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
303 }
304
305 /* called from kstop_machine */
306 static inline void ftrace_del_hash(struct dyn_ftrace *node)
307 {
308         hlist_del(&node->node);
309 }
310
311 static void ftrace_free_rec(struct dyn_ftrace *rec)
312 {
313         rec->ip = (unsigned long)ftrace_free_records;
314         ftrace_free_records = rec;
315         rec->flags |= FTRACE_FL_FREE;
316 }
317
318 void ftrace_release(void *start, unsigned long size)
319 {
320         struct dyn_ftrace *rec;
321         struct ftrace_page *pg;
322         unsigned long s = (unsigned long)start;
323         unsigned long e = s + size;
324         int i;
325
326         if (ftrace_disabled || !start)
327                 return;
328
329         /* should not be called from interrupt context */
330         spin_lock(&ftrace_lock);
331
332         for (pg = ftrace_pages_start; pg; pg = pg->next) {
333                 for (i = 0; i < pg->index; i++) {
334                         rec = &pg->records[i];
335
336                         if ((rec->ip >= s) && (rec->ip < e))
337                                 ftrace_free_rec(rec);
338                 }
339         }
340         spin_unlock(&ftrace_lock);
341
342         ftrace_release_hash(s, e);
343 }
344
345 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
346 {
347         struct dyn_ftrace *rec;
348
349         /* First check for freed records */
350         if (ftrace_free_records) {
351                 rec = ftrace_free_records;
352
353                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
354                         FTRACE_WARN_ON_ONCE(1);
355                         ftrace_free_records = NULL;
356                         return NULL;
357                 }
358
359                 ftrace_free_records = (void *)rec->ip;
360                 memset(rec, 0, sizeof(*rec));
361                 return rec;
362         }
363
364         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
365                 if (!ftrace_pages->next)
366                         return NULL;
367                 ftrace_pages = ftrace_pages->next;
368         }
369
370         return &ftrace_pages->records[ftrace_pages->index++];
371 }
372
373 static void
374 ftrace_record_ip(unsigned long ip)
375 {
376         struct dyn_ftrace *node;
377         unsigned long key;
378         int resched;
379         int cpu;
380
381         if (!ftrace_enabled || ftrace_disabled)
382                 return;
383
384         resched = need_resched();
385         preempt_disable_notrace();
386
387         /*
388          * We simply need to protect against recursion.
389          * Use the the raw version of smp_processor_id and not
390          * __get_cpu_var which can call debug hooks that can
391          * cause a recursive crash here.
392          */
393         cpu = raw_smp_processor_id();
394         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
395         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
396                 goto out;
397
398         if (unlikely(ftrace_record_suspend))
399                 goto out;
400
401         key = hash_long(ip, FTRACE_HASHBITS);
402
403         FTRACE_WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
404
405         if (ftrace_ip_in_hash(ip, key))
406                 goto out;
407
408         /* This ip may have hit the hash before the lock */
409         if (ftrace_ip_in_hash(ip, key))
410                 goto out;
411
412         node = ftrace_alloc_dyn_node(ip);
413         if (!node)
414                 goto out;
415
416         node->ip = ip;
417
418         ftrace_add_hash(node, key);
419
420  out:
421         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
422
423         /* prevent recursion with scheduler */
424         if (resched)
425                 preempt_enable_no_resched_notrace();
426         else
427                 preempt_enable_notrace();
428 }
429
430 #define FTRACE_ADDR ((long)(ftrace_caller))
431
432 static int
433 __ftrace_replace_code(struct dyn_ftrace *rec,
434                       unsigned char *old, unsigned char *new, int enable)
435 {
436         unsigned long ip, fl;
437
438         ip = rec->ip;
439
440         if (ftrace_filtered && enable) {
441                 /*
442                  * If filtering is on:
443                  *
444                  * If this record is set to be filtered and
445                  * is enabled then do nothing.
446                  *
447                  * If this record is set to be filtered and
448                  * it is not enabled, enable it.
449                  *
450                  * If this record is not set to be filtered
451                  * and it is not enabled do nothing.
452                  *
453                  * If this record is set not to trace then
454                  * do nothing.
455                  *
456                  * If this record is set not to trace and
457                  * it is enabled then disable it.
458                  *
459                  * If this record is not set to be filtered and
460                  * it is enabled, disable it.
461                  */
462
463                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
464                                    FTRACE_FL_ENABLED);
465
466                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
467                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
468                     !fl || (fl == FTRACE_FL_NOTRACE))
469                         return 0;
470
471                 /*
472                  * If it is enabled disable it,
473                  * otherwise enable it!
474                  */
475                 if (fl & FTRACE_FL_ENABLED) {
476                         /* swap new and old */
477                         new = old;
478                         old = ftrace_call_replace(ip, FTRACE_ADDR);
479                         rec->flags &= ~FTRACE_FL_ENABLED;
480                 } else {
481                         new = ftrace_call_replace(ip, FTRACE_ADDR);
482                         rec->flags |= FTRACE_FL_ENABLED;
483                 }
484         } else {
485
486                 if (enable) {
487                         /*
488                          * If this record is set not to trace and is
489                          * not enabled, do nothing.
490                          */
491                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
492                         if (fl == FTRACE_FL_NOTRACE)
493                                 return 0;
494
495                         new = ftrace_call_replace(ip, FTRACE_ADDR);
496                 } else
497                         old = ftrace_call_replace(ip, FTRACE_ADDR);
498
499                 if (enable) {
500                         if (rec->flags & FTRACE_FL_ENABLED)
501                                 return 0;
502                         rec->flags |= FTRACE_FL_ENABLED;
503                 } else {
504                         if (!(rec->flags & FTRACE_FL_ENABLED))
505                                 return 0;
506                         rec->flags &= ~FTRACE_FL_ENABLED;
507                 }
508         }
509
510         return ftrace_modify_code(ip, old, new);
511 }
512
513 static void ftrace_replace_code(int enable)
514 {
515         int i, failed;
516         unsigned char *new = NULL, *old = NULL;
517         struct dyn_ftrace *rec;
518         struct ftrace_page *pg;
519
520         if (enable)
521                 old = ftrace_nop_replace();
522         else
523                 new = ftrace_nop_replace();
524
525         for (pg = ftrace_pages_start; pg; pg = pg->next) {
526                 for (i = 0; i < pg->index; i++) {
527                         rec = &pg->records[i];
528
529                         /* don't modify code that has already faulted */
530                         if (rec->flags & FTRACE_FL_FAILED)
531                                 continue;
532
533                         /* ignore updates to this record's mcount site */
534                         if (get_kprobe((void *)rec->ip)) {
535                                 freeze_record(rec);
536                                 continue;
537                         } else {
538                                 unfreeze_record(rec);
539                         }
540
541                         failed = __ftrace_replace_code(rec, old, new, enable);
542                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
543                                 rec->flags |= FTRACE_FL_FAILED;
544                                 if ((system_state == SYSTEM_BOOTING) ||
545                                     !core_kernel_text(rec->ip)) {
546                                         ftrace_del_hash(rec);
547                                         ftrace_free_rec(rec);
548                                 }
549                         }
550                 }
551         }
552 }
553
554 static void ftrace_shutdown_replenish(void)
555 {
556         if (ftrace_pages->next)
557                 return;
558
559         /* allocate another page */
560         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
561 }
562
563 static void print_ip_ins(const char *fmt, unsigned char *p)
564 {
565         int i;
566
567         printk(KERN_CONT "%s", fmt);
568
569         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
570                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
571 }
572
573 static int
574 ftrace_code_disable(struct dyn_ftrace *rec)
575 {
576         unsigned long ip;
577         unsigned char *nop, *call;
578         int ret;
579
580         ip = rec->ip;
581
582         nop = ftrace_nop_replace();
583         call = ftrace_call_replace(ip, mcount_addr);
584
585         ret = ftrace_modify_code(ip, call, nop);
586         if (ret) {
587                 switch (ret) {
588                 case -EFAULT:
589                         FTRACE_WARN_ON_ONCE(1);
590                         pr_info("ftrace faulted on modifying ");
591                         print_ip_sym(ip);
592                         break;
593                 case -EINVAL:
594                         FTRACE_WARN_ON_ONCE(1);
595                         pr_info("ftrace failed to modify ");
596                         print_ip_sym(ip);
597                         print_ip_ins(" expected: ", call);
598                         print_ip_ins(" actual: ", (unsigned char *)ip);
599                         print_ip_ins(" replace: ", nop);
600                         printk(KERN_CONT "\n");
601                         break;
602                 case -EPERM:
603                         FTRACE_WARN_ON_ONCE(1);
604                         pr_info("ftrace faulted on writing ");
605                         print_ip_sym(ip);
606                         break;
607                 default:
608                         FTRACE_WARN_ON_ONCE(1);
609                         pr_info("ftrace faulted on unknown error ");
610                         print_ip_sym(ip);
611                 }
612
613                 rec->flags |= FTRACE_FL_FAILED;
614                 return 0;
615         }
616         return 1;
617 }
618
619 static int ftrace_update_code(void *ignore);
620
621 static int __ftrace_modify_code(void *data)
622 {
623         unsigned long addr;
624         int *command = data;
625
626         if (*command & FTRACE_ENABLE_CALLS) {
627                 /*
628                  * Update any recorded ips now that we have the
629                  * machine stopped
630                  */
631                 ftrace_update_code(NULL);
632                 ftrace_replace_code(1);
633                 tracing_on = 1;
634         } else if (*command & FTRACE_DISABLE_CALLS) {
635                 ftrace_replace_code(0);
636                 tracing_on = 0;
637         }
638
639         if (*command & FTRACE_UPDATE_TRACE_FUNC)
640                 ftrace_update_ftrace_func(ftrace_trace_function);
641
642         if (*command & FTRACE_ENABLE_MCOUNT) {
643                 addr = (unsigned long)ftrace_record_ip;
644                 ftrace_mcount_set(&addr);
645         } else if (*command & FTRACE_DISABLE_MCOUNT) {
646                 addr = (unsigned long)ftrace_stub;
647                 ftrace_mcount_set(&addr);
648         }
649
650         return 0;
651 }
652
653 static void ftrace_run_update_code(int command)
654 {
655         stop_machine(__ftrace_modify_code, &command, NULL);
656 }
657
658 static ftrace_func_t saved_ftrace_func;
659 static int ftrace_start;
660 static DEFINE_MUTEX(ftrace_start_lock);
661
662 static void ftrace_startup(void)
663 {
664         int command = 0;
665
666         if (unlikely(ftrace_disabled))
667                 return;
668
669         mutex_lock(&ftrace_start_lock);
670         ftrace_start++;
671         if (ftrace_start == 1)
672                 command |= FTRACE_ENABLE_CALLS;
673
674         if (saved_ftrace_func != ftrace_trace_function) {
675                 saved_ftrace_func = ftrace_trace_function;
676                 command |= FTRACE_UPDATE_TRACE_FUNC;
677         }
678
679         if (!command || !ftrace_enabled)
680                 goto out;
681
682         ftrace_run_update_code(command);
683  out:
684         mutex_unlock(&ftrace_start_lock);
685 }
686
687 static void ftrace_shutdown(void)
688 {
689         int command = 0;
690
691         if (unlikely(ftrace_disabled))
692                 return;
693
694         mutex_lock(&ftrace_start_lock);
695         ftrace_start--;
696         if (!ftrace_start)
697                 command |= FTRACE_DISABLE_CALLS;
698
699         if (saved_ftrace_func != ftrace_trace_function) {
700                 saved_ftrace_func = ftrace_trace_function;
701                 command |= FTRACE_UPDATE_TRACE_FUNC;
702         }
703
704         if (!command || !ftrace_enabled)
705                 goto out;
706
707         ftrace_run_update_code(command);
708  out:
709         mutex_unlock(&ftrace_start_lock);
710 }
711
712 static void ftrace_startup_sysctl(void)
713 {
714         int command = FTRACE_ENABLE_MCOUNT;
715
716         if (unlikely(ftrace_disabled))
717                 return;
718
719         mutex_lock(&ftrace_start_lock);
720         /* Force update next time */
721         saved_ftrace_func = NULL;
722         /* ftrace_start is true if we want ftrace running */
723         if (ftrace_start)
724                 command |= FTRACE_ENABLE_CALLS;
725
726         ftrace_run_update_code(command);
727         mutex_unlock(&ftrace_start_lock);
728 }
729
730 static void ftrace_shutdown_sysctl(void)
731 {
732         int command = FTRACE_DISABLE_MCOUNT;
733
734         if (unlikely(ftrace_disabled))
735                 return;
736
737         mutex_lock(&ftrace_start_lock);
738         /* ftrace_start is true if ftrace is running */
739         if (ftrace_start)
740                 command |= FTRACE_DISABLE_CALLS;
741
742         ftrace_run_update_code(command);
743         mutex_unlock(&ftrace_start_lock);
744 }
745
746 static cycle_t          ftrace_update_time;
747 static unsigned long    ftrace_update_cnt;
748 unsigned long           ftrace_update_tot_cnt;
749
750 static int ftrace_update_code(void *ignore)
751 {
752         int i, save_ftrace_enabled;
753         cycle_t start, stop;
754         struct dyn_ftrace *p;
755         struct hlist_node *t, *n;
756         struct hlist_head *head, temp_list;
757
758         /* Don't be recording funcs now */
759         ftrace_record_suspend++;
760         save_ftrace_enabled = ftrace_enabled;
761         ftrace_enabled = 0;
762
763         start = ftrace_now(raw_smp_processor_id());
764         ftrace_update_cnt = 0;
765
766         /* No locks needed, the machine is stopped! */
767         for (i = 0; i < FTRACE_HASHSIZE; i++) {
768                 INIT_HLIST_HEAD(&temp_list);
769                 head = &ftrace_hash[i];
770
771                 /* all CPUS are stopped, we are safe to modify code */
772                 hlist_for_each_entry_safe(p, t, n, head, node) {
773                         /* Skip over failed records which have not been
774                          * freed. */
775                         if (p->flags & FTRACE_FL_FAILED)
776                                 continue;
777
778                         /* Unconverted records are always at the head of the
779                          * hash bucket. Once we encounter a converted record,
780                          * simply skip over to the next bucket. Saves ftraced
781                          * some processor cycles (ftrace does its bid for
782                          * global warming :-p ). */
783                         if (p->flags & (FTRACE_FL_CONVERTED))
784                                 break;
785
786                         /* Ignore updates to this record's mcount site.
787                          * Reintroduce this record at the head of this
788                          * bucket to attempt to "convert" it again if
789                          * the kprobe on it is unregistered before the
790                          * next run. */
791                         if (get_kprobe((void *)p->ip)) {
792                                 ftrace_del_hash(p);
793                                 INIT_HLIST_NODE(&p->node);
794                                 hlist_add_head(&p->node, &temp_list);
795                                 freeze_record(p);
796                                 continue;
797                         } else {
798                                 unfreeze_record(p);
799                         }
800
801                         /* convert record (i.e, patch mcount-call with NOP) */
802                         if (ftrace_code_disable(p)) {
803                                 p->flags |= FTRACE_FL_CONVERTED;
804                                 ftrace_update_cnt++;
805                         } else {
806                                 if ((system_state == SYSTEM_BOOTING) ||
807                                     !core_kernel_text(p->ip)) {
808                                         ftrace_del_hash(p);
809                                         ftrace_free_rec(p);
810                                 }
811                         }
812                 }
813
814                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
815                         hlist_del(&p->node);
816                         INIT_HLIST_NODE(&p->node);
817                         hlist_add_head(&p->node, head);
818                 }
819         }
820
821         stop = ftrace_now(raw_smp_processor_id());
822         ftrace_update_time = stop - start;
823         ftrace_update_tot_cnt += ftrace_update_cnt;
824
825         ftrace_enabled = save_ftrace_enabled;
826         ftrace_record_suspend--;
827
828         return 0;
829 }
830
831 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
832 {
833         struct ftrace_page *pg;
834         int cnt;
835         int i;
836
837         /* allocate a few pages */
838         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
839         if (!ftrace_pages_start)
840                 return -1;
841
842         /*
843          * Allocate a few more pages.
844          *
845          * TODO: have some parser search vmlinux before
846          *   final linking to find all calls to ftrace.
847          *   Then we can:
848          *    a) know how many pages to allocate.
849          *     and/or
850          *    b) set up the table then.
851          *
852          *  The dynamic code is still necessary for
853          *  modules.
854          */
855
856         pg = ftrace_pages = ftrace_pages_start;
857
858         cnt = num_to_init / ENTRIES_PER_PAGE;
859         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
860                 num_to_init, cnt);
861
862         for (i = 0; i < cnt; i++) {
863                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
864
865                 /* If we fail, we'll try later anyway */
866                 if (!pg->next)
867                         break;
868
869                 pg = pg->next;
870         }
871
872         return 0;
873 }
874
875 enum {
876         FTRACE_ITER_FILTER      = (1 << 0),
877         FTRACE_ITER_CONT        = (1 << 1),
878         FTRACE_ITER_NOTRACE     = (1 << 2),
879         FTRACE_ITER_FAILURES    = (1 << 3),
880 };
881
882 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
883
884 struct ftrace_iterator {
885         loff_t                  pos;
886         struct ftrace_page      *pg;
887         unsigned                idx;
888         unsigned                flags;
889         unsigned char           buffer[FTRACE_BUFF_MAX+1];
890         unsigned                buffer_idx;
891         unsigned                filtered;
892 };
893
894 static void *
895 t_next(struct seq_file *m, void *v, loff_t *pos)
896 {
897         struct ftrace_iterator *iter = m->private;
898         struct dyn_ftrace *rec = NULL;
899
900         (*pos)++;
901
902         /* should not be called from interrupt context */
903         spin_lock(&ftrace_lock);
904  retry:
905         if (iter->idx >= iter->pg->index) {
906                 if (iter->pg->next) {
907                         iter->pg = iter->pg->next;
908                         iter->idx = 0;
909                         goto retry;
910                 }
911         } else {
912                 rec = &iter->pg->records[iter->idx++];
913                 if ((rec->flags & FTRACE_FL_FREE) ||
914
915                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
916                      (rec->flags & FTRACE_FL_FAILED)) ||
917
918                     ((iter->flags & FTRACE_ITER_FAILURES) &&
919                      !(rec->flags & FTRACE_FL_FAILED)) ||
920
921                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
922                      !(rec->flags & FTRACE_FL_NOTRACE))) {
923                         rec = NULL;
924                         goto retry;
925                 }
926         }
927         spin_unlock(&ftrace_lock);
928
929         iter->pos = *pos;
930
931         return rec;
932 }
933
934 static void *t_start(struct seq_file *m, loff_t *pos)
935 {
936         struct ftrace_iterator *iter = m->private;
937         void *p = NULL;
938         loff_t l = -1;
939
940         if (*pos != iter->pos) {
941                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
942                         ;
943         } else {
944                 l = *pos;
945                 p = t_next(m, p, &l);
946         }
947
948         return p;
949 }
950
951 static void t_stop(struct seq_file *m, void *p)
952 {
953 }
954
955 static int t_show(struct seq_file *m, void *v)
956 {
957         struct dyn_ftrace *rec = v;
958         char str[KSYM_SYMBOL_LEN];
959
960         if (!rec)
961                 return 0;
962
963         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
964
965         seq_printf(m, "%s\n", str);
966
967         return 0;
968 }
969
970 static struct seq_operations show_ftrace_seq_ops = {
971         .start = t_start,
972         .next = t_next,
973         .stop = t_stop,
974         .show = t_show,
975 };
976
977 static int
978 ftrace_avail_open(struct inode *inode, struct file *file)
979 {
980         struct ftrace_iterator *iter;
981         int ret;
982
983         if (unlikely(ftrace_disabled))
984                 return -ENODEV;
985
986         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
987         if (!iter)
988                 return -ENOMEM;
989
990         iter->pg = ftrace_pages_start;
991         iter->pos = -1;
992
993         ret = seq_open(file, &show_ftrace_seq_ops);
994         if (!ret) {
995                 struct seq_file *m = file->private_data;
996
997                 m->private = iter;
998         } else {
999                 kfree(iter);
1000         }
1001
1002         return ret;
1003 }
1004
1005 int ftrace_avail_release(struct inode *inode, struct file *file)
1006 {
1007         struct seq_file *m = (struct seq_file *)file->private_data;
1008         struct ftrace_iterator *iter = m->private;
1009
1010         seq_release(inode, file);
1011         kfree(iter);
1012
1013         return 0;
1014 }
1015
1016 static int
1017 ftrace_failures_open(struct inode *inode, struct file *file)
1018 {
1019         int ret;
1020         struct seq_file *m;
1021         struct ftrace_iterator *iter;
1022
1023         ret = ftrace_avail_open(inode, file);
1024         if (!ret) {
1025                 m = (struct seq_file *)file->private_data;
1026                 iter = (struct ftrace_iterator *)m->private;
1027                 iter->flags = FTRACE_ITER_FAILURES;
1028         }
1029
1030         return ret;
1031 }
1032
1033
1034 static void ftrace_filter_reset(int enable)
1035 {
1036         struct ftrace_page *pg;
1037         struct dyn_ftrace *rec;
1038         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1039         unsigned i;
1040
1041         /* should not be called from interrupt context */
1042         spin_lock(&ftrace_lock);
1043         if (enable)
1044                 ftrace_filtered = 0;
1045         pg = ftrace_pages_start;
1046         while (pg) {
1047                 for (i = 0; i < pg->index; i++) {
1048                         rec = &pg->records[i];
1049                         if (rec->flags & FTRACE_FL_FAILED)
1050                                 continue;
1051                         rec->flags &= ~type;
1052                 }
1053                 pg = pg->next;
1054         }
1055         spin_unlock(&ftrace_lock);
1056 }
1057
1058 static int
1059 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1060 {
1061         struct ftrace_iterator *iter;
1062         int ret = 0;
1063
1064         if (unlikely(ftrace_disabled))
1065                 return -ENODEV;
1066
1067         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1068         if (!iter)
1069                 return -ENOMEM;
1070
1071         mutex_lock(&ftrace_regex_lock);
1072         if ((file->f_mode & FMODE_WRITE) &&
1073             !(file->f_flags & O_APPEND))
1074                 ftrace_filter_reset(enable);
1075
1076         if (file->f_mode & FMODE_READ) {
1077                 iter->pg = ftrace_pages_start;
1078                 iter->pos = -1;
1079                 iter->flags = enable ? FTRACE_ITER_FILTER :
1080                         FTRACE_ITER_NOTRACE;
1081
1082                 ret = seq_open(file, &show_ftrace_seq_ops);
1083                 if (!ret) {
1084                         struct seq_file *m = file->private_data;
1085                         m->private = iter;
1086                 } else
1087                         kfree(iter);
1088         } else
1089                 file->private_data = iter;
1090         mutex_unlock(&ftrace_regex_lock);
1091
1092         return ret;
1093 }
1094
1095 static int
1096 ftrace_filter_open(struct inode *inode, struct file *file)
1097 {
1098         return ftrace_regex_open(inode, file, 1);
1099 }
1100
1101 static int
1102 ftrace_notrace_open(struct inode *inode, struct file *file)
1103 {
1104         return ftrace_regex_open(inode, file, 0);
1105 }
1106
1107 static ssize_t
1108 ftrace_regex_read(struct file *file, char __user *ubuf,
1109                        size_t cnt, loff_t *ppos)
1110 {
1111         if (file->f_mode & FMODE_READ)
1112                 return seq_read(file, ubuf, cnt, ppos);
1113         else
1114                 return -EPERM;
1115 }
1116
1117 static loff_t
1118 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1119 {
1120         loff_t ret;
1121
1122         if (file->f_mode & FMODE_READ)
1123                 ret = seq_lseek(file, offset, origin);
1124         else
1125                 file->f_pos = ret = 1;
1126
1127         return ret;
1128 }
1129
1130 enum {
1131         MATCH_FULL,
1132         MATCH_FRONT_ONLY,
1133         MATCH_MIDDLE_ONLY,
1134         MATCH_END_ONLY,
1135 };
1136
1137 static void
1138 ftrace_match(unsigned char *buff, int len, int enable)
1139 {
1140         char str[KSYM_SYMBOL_LEN];
1141         char *search = NULL;
1142         struct ftrace_page *pg;
1143         struct dyn_ftrace *rec;
1144         int type = MATCH_FULL;
1145         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1146         unsigned i, match = 0, search_len = 0;
1147
1148         for (i = 0; i < len; i++) {
1149                 if (buff[i] == '*') {
1150                         if (!i) {
1151                                 search = buff + i + 1;
1152                                 type = MATCH_END_ONLY;
1153                                 search_len = len - (i + 1);
1154                         } else {
1155                                 if (type == MATCH_END_ONLY) {
1156                                         type = MATCH_MIDDLE_ONLY;
1157                                 } else {
1158                                         match = i;
1159                                         type = MATCH_FRONT_ONLY;
1160                                 }
1161                                 buff[i] = 0;
1162                                 break;
1163                         }
1164                 }
1165         }
1166
1167         /* should not be called from interrupt context */
1168         spin_lock(&ftrace_lock);
1169         if (enable)
1170                 ftrace_filtered = 1;
1171         pg = ftrace_pages_start;
1172         while (pg) {
1173                 for (i = 0; i < pg->index; i++) {
1174                         int matched = 0;
1175                         char *ptr;
1176
1177                         rec = &pg->records[i];
1178                         if (rec->flags & FTRACE_FL_FAILED)
1179                                 continue;
1180                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1181                         switch (type) {
1182                         case MATCH_FULL:
1183                                 if (strcmp(str, buff) == 0)
1184                                         matched = 1;
1185                                 break;
1186                         case MATCH_FRONT_ONLY:
1187                                 if (memcmp(str, buff, match) == 0)
1188                                         matched = 1;
1189                                 break;
1190                         case MATCH_MIDDLE_ONLY:
1191                                 if (strstr(str, search))
1192                                         matched = 1;
1193                                 break;
1194                         case MATCH_END_ONLY:
1195                                 ptr = strstr(str, search);
1196                                 if (ptr && (ptr[search_len] == 0))
1197                                         matched = 1;
1198                                 break;
1199                         }
1200                         if (matched)
1201                                 rec->flags |= flag;
1202                 }
1203                 pg = pg->next;
1204         }
1205         spin_unlock(&ftrace_lock);
1206 }
1207
1208 static ssize_t
1209 ftrace_regex_write(struct file *file, const char __user *ubuf,
1210                    size_t cnt, loff_t *ppos, int enable)
1211 {
1212         struct ftrace_iterator *iter;
1213         char ch;
1214         size_t read = 0;
1215         ssize_t ret;
1216
1217         if (!cnt || cnt < 0)
1218                 return 0;
1219
1220         mutex_lock(&ftrace_regex_lock);
1221
1222         if (file->f_mode & FMODE_READ) {
1223                 struct seq_file *m = file->private_data;
1224                 iter = m->private;
1225         } else
1226                 iter = file->private_data;
1227
1228         if (!*ppos) {
1229                 iter->flags &= ~FTRACE_ITER_CONT;
1230                 iter->buffer_idx = 0;
1231         }
1232
1233         ret = get_user(ch, ubuf++);
1234         if (ret)
1235                 goto out;
1236         read++;
1237         cnt--;
1238
1239         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1240                 /* skip white space */
1241                 while (cnt && isspace(ch)) {
1242                         ret = get_user(ch, ubuf++);
1243                         if (ret)
1244                                 goto out;
1245                         read++;
1246                         cnt--;
1247                 }
1248
1249                 if (isspace(ch)) {
1250                         file->f_pos += read;
1251                         ret = read;
1252                         goto out;
1253                 }
1254
1255                 iter->buffer_idx = 0;
1256         }
1257
1258         while (cnt && !isspace(ch)) {
1259                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1260                         iter->buffer[iter->buffer_idx++] = ch;
1261                 else {
1262                         ret = -EINVAL;
1263                         goto out;
1264                 }
1265                 ret = get_user(ch, ubuf++);
1266                 if (ret)
1267                         goto out;
1268                 read++;
1269                 cnt--;
1270         }
1271
1272         if (isspace(ch)) {
1273                 iter->filtered++;
1274                 iter->buffer[iter->buffer_idx] = 0;
1275                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1276                 iter->buffer_idx = 0;
1277         } else
1278                 iter->flags |= FTRACE_ITER_CONT;
1279
1280
1281         file->f_pos += read;
1282
1283         ret = read;
1284  out:
1285         mutex_unlock(&ftrace_regex_lock);
1286
1287         return ret;
1288 }
1289
1290 static ssize_t
1291 ftrace_filter_write(struct file *file, const char __user *ubuf,
1292                     size_t cnt, loff_t *ppos)
1293 {
1294         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1295 }
1296
1297 static ssize_t
1298 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1299                      size_t cnt, loff_t *ppos)
1300 {
1301         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1302 }
1303
1304 static void
1305 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1306 {
1307         if (unlikely(ftrace_disabled))
1308                 return;
1309
1310         mutex_lock(&ftrace_regex_lock);
1311         if (reset)
1312                 ftrace_filter_reset(enable);
1313         if (buf)
1314                 ftrace_match(buf, len, enable);
1315         mutex_unlock(&ftrace_regex_lock);
1316 }
1317
1318 /**
1319  * ftrace_set_filter - set a function to filter on in ftrace
1320  * @buf - the string that holds the function filter text.
1321  * @len - the length of the string.
1322  * @reset - non zero to reset all filters before applying this filter.
1323  *
1324  * Filters denote which functions should be enabled when tracing is enabled.
1325  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1326  */
1327 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1328 {
1329         ftrace_set_regex(buf, len, reset, 1);
1330 }
1331
1332 /**
1333  * ftrace_set_notrace - set a function to not trace in ftrace
1334  * @buf - the string that holds the function notrace text.
1335  * @len - the length of the string.
1336  * @reset - non zero to reset all filters before applying this filter.
1337  *
1338  * Notrace Filters denote which functions should not be enabled when tracing
1339  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1340  * for tracing.
1341  */
1342 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1343 {
1344         ftrace_set_regex(buf, len, reset, 0);
1345 }
1346
1347 static int
1348 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1349 {
1350         struct seq_file *m = (struct seq_file *)file->private_data;
1351         struct ftrace_iterator *iter;
1352
1353         mutex_lock(&ftrace_regex_lock);
1354         if (file->f_mode & FMODE_READ) {
1355                 iter = m->private;
1356
1357                 seq_release(inode, file);
1358         } else
1359                 iter = file->private_data;
1360
1361         if (iter->buffer_idx) {
1362                 iter->filtered++;
1363                 iter->buffer[iter->buffer_idx] = 0;
1364                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1365         }
1366
1367         mutex_lock(&ftrace_sysctl_lock);
1368         mutex_lock(&ftrace_start_lock);
1369         if (iter->filtered && ftrace_start && ftrace_enabled)
1370                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1371         mutex_unlock(&ftrace_start_lock);
1372         mutex_unlock(&ftrace_sysctl_lock);
1373
1374         kfree(iter);
1375         mutex_unlock(&ftrace_regex_lock);
1376         return 0;
1377 }
1378
1379 static int
1380 ftrace_filter_release(struct inode *inode, struct file *file)
1381 {
1382         return ftrace_regex_release(inode, file, 1);
1383 }
1384
1385 static int
1386 ftrace_notrace_release(struct inode *inode, struct file *file)
1387 {
1388         return ftrace_regex_release(inode, file, 0);
1389 }
1390
1391 static struct file_operations ftrace_avail_fops = {
1392         .open = ftrace_avail_open,
1393         .read = seq_read,
1394         .llseek = seq_lseek,
1395         .release = ftrace_avail_release,
1396 };
1397
1398 static struct file_operations ftrace_failures_fops = {
1399         .open = ftrace_failures_open,
1400         .read = seq_read,
1401         .llseek = seq_lseek,
1402         .release = ftrace_avail_release,
1403 };
1404
1405 static struct file_operations ftrace_filter_fops = {
1406         .open = ftrace_filter_open,
1407         .read = ftrace_regex_read,
1408         .write = ftrace_filter_write,
1409         .llseek = ftrace_regex_lseek,
1410         .release = ftrace_filter_release,
1411 };
1412
1413 static struct file_operations ftrace_notrace_fops = {
1414         .open = ftrace_notrace_open,
1415         .read = ftrace_regex_read,
1416         .write = ftrace_notrace_write,
1417         .llseek = ftrace_regex_lseek,
1418         .release = ftrace_notrace_release,
1419 };
1420
1421 static __init int ftrace_init_debugfs(void)
1422 {
1423         struct dentry *d_tracer;
1424         struct dentry *entry;
1425
1426         d_tracer = tracing_init_dentry();
1427
1428         entry = debugfs_create_file("available_filter_functions", 0444,
1429                                     d_tracer, NULL, &ftrace_avail_fops);
1430         if (!entry)
1431                 pr_warning("Could not create debugfs "
1432                            "'available_filter_functions' entry\n");
1433
1434         entry = debugfs_create_file("failures", 0444,
1435                                     d_tracer, NULL, &ftrace_failures_fops);
1436         if (!entry)
1437                 pr_warning("Could not create debugfs 'failures' entry\n");
1438
1439         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1440                                     NULL, &ftrace_filter_fops);
1441         if (!entry)
1442                 pr_warning("Could not create debugfs "
1443                            "'set_ftrace_filter' entry\n");
1444
1445         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1446                                     NULL, &ftrace_notrace_fops);
1447         if (!entry)
1448                 pr_warning("Could not create debugfs "
1449                            "'set_ftrace_notrace' entry\n");
1450
1451         return 0;
1452 }
1453
1454 fs_initcall(ftrace_init_debugfs);
1455
1456 static int ftrace_convert_nops(unsigned long *start,
1457                                unsigned long *end)
1458 {
1459         unsigned long *p;
1460         unsigned long addr;
1461         unsigned long flags;
1462
1463         p = start;
1464         while (p < end) {
1465                 addr = ftrace_call_adjust(*p++);
1466                 /* should not be called from interrupt context */
1467                 spin_lock(&ftrace_lock);
1468                 ftrace_record_ip(addr);
1469                 spin_unlock(&ftrace_lock);
1470                 ftrace_shutdown_replenish();
1471         }
1472
1473         /* p is ignored */
1474         local_irq_save(flags);
1475         ftrace_update_code(p);
1476         local_irq_restore(flags);
1477
1478         return 0;
1479 }
1480
1481 void ftrace_init_module(unsigned long *start, unsigned long *end)
1482 {
1483         if (ftrace_disabled || start == end)
1484                 return;
1485         ftrace_convert_nops(start, end);
1486 }
1487
1488 extern unsigned long __start_mcount_loc[];
1489 extern unsigned long __stop_mcount_loc[];
1490
1491 void __init ftrace_init(void)
1492 {
1493         unsigned long count, addr, flags;
1494         int ret;
1495
1496         /* Keep the ftrace pointer to the stub */
1497         addr = (unsigned long)ftrace_stub;
1498
1499         local_irq_save(flags);
1500         ftrace_dyn_arch_init(&addr);
1501         local_irq_restore(flags);
1502
1503         /* ftrace_dyn_arch_init places the return code in addr */
1504         if (addr)
1505                 goto failed;
1506
1507         count = __stop_mcount_loc - __start_mcount_loc;
1508
1509         ret = ftrace_dyn_table_alloc(count);
1510         if (ret)
1511                 goto failed;
1512
1513         last_ftrace_enabled = ftrace_enabled = 1;
1514
1515         ret = ftrace_convert_nops(__start_mcount_loc,
1516                                   __stop_mcount_loc);
1517
1518         return;
1519  failed:
1520         ftrace_disabled = 1;
1521 }
1522
1523 #else
1524 # define ftrace_startup()               do { } while (0)
1525 # define ftrace_shutdown()              do { } while (0)
1526 # define ftrace_startup_sysctl()        do { } while (0)
1527 # define ftrace_shutdown_sysctl()       do { } while (0)
1528 #endif /* CONFIG_DYNAMIC_FTRACE */
1529
1530 /**
1531  * ftrace_kill - kill ftrace
1532  *
1533  * This function should be used by panic code. It stops ftrace
1534  * but in a not so nice way. If you need to simply kill ftrace
1535  * from a non-atomic section, use ftrace_kill.
1536  */
1537 void ftrace_kill(void)
1538 {
1539         ftrace_disabled = 1;
1540         ftrace_enabled = 0;
1541         clear_ftrace_function();
1542 }
1543
1544 /**
1545  * register_ftrace_function - register a function for profiling
1546  * @ops - ops structure that holds the function for profiling.
1547  *
1548  * Register a function to be called by all functions in the
1549  * kernel.
1550  *
1551  * Note: @ops->func and all the functions it calls must be labeled
1552  *       with "notrace", otherwise it will go into a
1553  *       recursive loop.
1554  */
1555 int register_ftrace_function(struct ftrace_ops *ops)
1556 {
1557         int ret;
1558
1559         if (unlikely(ftrace_disabled))
1560                 return -1;
1561
1562         mutex_lock(&ftrace_sysctl_lock);
1563         ret = __register_ftrace_function(ops);
1564         ftrace_startup();
1565         mutex_unlock(&ftrace_sysctl_lock);
1566
1567         return ret;
1568 }
1569
1570 /**
1571  * unregister_ftrace_function - unresgister a function for profiling.
1572  * @ops - ops structure that holds the function to unregister
1573  *
1574  * Unregister a function that was added to be called by ftrace profiling.
1575  */
1576 int unregister_ftrace_function(struct ftrace_ops *ops)
1577 {
1578         int ret;
1579
1580         mutex_lock(&ftrace_sysctl_lock);
1581         ret = __unregister_ftrace_function(ops);
1582         ftrace_shutdown();
1583         mutex_unlock(&ftrace_sysctl_lock);
1584
1585         return ret;
1586 }
1587
1588 int
1589 ftrace_enable_sysctl(struct ctl_table *table, int write,
1590                      struct file *file, void __user *buffer, size_t *lenp,
1591                      loff_t *ppos)
1592 {
1593         int ret;
1594
1595         if (unlikely(ftrace_disabled))
1596                 return -ENODEV;
1597
1598         mutex_lock(&ftrace_sysctl_lock);
1599
1600         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1601
1602         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1603                 goto out;
1604
1605         last_ftrace_enabled = ftrace_enabled;
1606
1607         if (ftrace_enabled) {
1608
1609                 ftrace_startup_sysctl();
1610
1611                 /* we are starting ftrace again */
1612                 if (ftrace_list != &ftrace_list_end) {
1613                         if (ftrace_list->next == &ftrace_list_end)
1614                                 ftrace_trace_function = ftrace_list->func;
1615                         else
1616                                 ftrace_trace_function = ftrace_list_func;
1617                 }
1618
1619         } else {
1620                 /* stopping ftrace calls (just send to ftrace_stub) */
1621                 ftrace_trace_function = ftrace_stub;
1622
1623                 ftrace_shutdown_sysctl();
1624         }
1625
1626  out:
1627         mutex_unlock(&ftrace_sysctl_lock);
1628         return ret;
1629 }