]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: fix build failure
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* should not be called from interrupt context */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         /* should not be called from interrupt context */
119         spin_lock(&ftrace_lock);
120
121         /*
122          * If we are removing the last function, then simply point
123          * to the ftrace_stub.
124          */
125         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126                 ftrace_trace_function = ftrace_stub;
127                 ftrace_list = &ftrace_list_end;
128                 goto out;
129         }
130
131         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132                 if (*p == ops)
133                         break;
134
135         if (*p != ops) {
136                 ret = -1;
137                 goto out;
138         }
139
140         *p = (*p)->next;
141
142         if (ftrace_enabled) {
143                 /* If we only have one func left, then call that directly */
144                 if (ftrace_list == &ftrace_list_end ||
145                     ftrace_list->next == &ftrace_list_end)
146                         ftrace_trace_function = ftrace_list->func;
147         }
148
149  out:
150         spin_unlock(&ftrace_lock);
151
152         return ret;
153 }
154
155 #ifdef CONFIG_DYNAMIC_FTRACE
156
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
158 /*
159  * The hash lock is only needed when the recording of the mcount
160  * callers are dynamic. That is, by the caller themselves and
161  * not recorded via the compilation.
162  */
163 static DEFINE_SPINLOCK(ftrace_hash_lock);
164 #define ftrace_hash_lock(flags)   spin_lock_irqsave(&ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
166 #else
167 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
168 #define ftrace_hash_lock(flags)   do { (void)flags; } while (0)
169 #define ftrace_hash_unlock(flags) do { } while(0)
170 #endif
171
172 static struct task_struct *ftraced_task;
173
174 enum {
175         FTRACE_ENABLE_CALLS             = (1 << 0),
176         FTRACE_DISABLE_CALLS            = (1 << 1),
177         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
178         FTRACE_ENABLE_MCOUNT            = (1 << 3),
179         FTRACE_DISABLE_MCOUNT           = (1 << 4),
180 };
181
182 static int ftrace_filtered;
183 static int tracing_on;
184 static int frozen_record_count;
185
186 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
187
188 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
189
190 static DEFINE_MUTEX(ftraced_lock);
191 static DEFINE_MUTEX(ftrace_regex_lock);
192
193 struct ftrace_page {
194         struct ftrace_page      *next;
195         unsigned long           index;
196         struct dyn_ftrace       records[];
197 };
198
199 #define ENTRIES_PER_PAGE \
200   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
201
202 /* estimate from running different kernels */
203 #define NR_TO_INIT              10000
204
205 static struct ftrace_page       *ftrace_pages_start;
206 static struct ftrace_page       *ftrace_pages;
207
208 static int ftraced_trigger;
209 static int ftraced_suspend;
210 static int ftraced_stop;
211
212 static int ftrace_record_suspend;
213
214 static struct dyn_ftrace *ftrace_free_records;
215
216
217 #ifdef CONFIG_KPROBES
218 static inline void freeze_record(struct dyn_ftrace *rec)
219 {
220         if (!(rec->flags & FTRACE_FL_FROZEN)) {
221                 rec->flags |= FTRACE_FL_FROZEN;
222                 frozen_record_count++;
223         }
224 }
225
226 static inline void unfreeze_record(struct dyn_ftrace *rec)
227 {
228         if (rec->flags & FTRACE_FL_FROZEN) {
229                 rec->flags &= ~FTRACE_FL_FROZEN;
230                 frozen_record_count--;
231         }
232 }
233
234 static inline int record_frozen(struct dyn_ftrace *rec)
235 {
236         return rec->flags & FTRACE_FL_FROZEN;
237 }
238 #else
239 # define freeze_record(rec)                     ({ 0; })
240 # define unfreeze_record(rec)                   ({ 0; })
241 # define record_frozen(rec)                     ({ 0; })
242 #endif /* CONFIG_KPROBES */
243
244 int skip_trace(unsigned long ip)
245 {
246         unsigned long fl;
247         struct dyn_ftrace *rec;
248         struct hlist_node *t;
249         struct hlist_head *head;
250
251         if (frozen_record_count == 0)
252                 return 0;
253
254         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
255         hlist_for_each_entry_rcu(rec, t, head, node) {
256                 if (rec->ip == ip) {
257                         if (record_frozen(rec)) {
258                                 if (rec->flags & FTRACE_FL_FAILED)
259                                         return 1;
260
261                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
262                                         return 1;
263
264                                 if (!tracing_on || !ftrace_enabled)
265                                         return 1;
266
267                                 if (ftrace_filtered) {
268                                         fl = rec->flags & (FTRACE_FL_FILTER |
269                                                            FTRACE_FL_NOTRACE);
270                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
271                                                 return 1;
272                                 }
273                         }
274                         break;
275                 }
276         }
277
278         return 0;
279 }
280
281 static inline int
282 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
283 {
284         struct dyn_ftrace *p;
285         struct hlist_node *t;
286         int found = 0;
287
288         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
289                 if (p->ip == ip) {
290                         found = 1;
291                         break;
292                 }
293         }
294
295         return found;
296 }
297
298 static inline void
299 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
300 {
301         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
302 }
303
304 /* called from kstop_machine */
305 static inline void ftrace_del_hash(struct dyn_ftrace *node)
306 {
307         hlist_del(&node->node);
308 }
309
310 static void ftrace_free_rec(struct dyn_ftrace *rec)
311 {
312         rec->ip = (unsigned long)ftrace_free_records;
313         ftrace_free_records = rec;
314         rec->flags |= FTRACE_FL_FREE;
315 }
316
317 void ftrace_release(void *start, unsigned long size)
318 {
319         struct dyn_ftrace *rec;
320         struct ftrace_page *pg;
321         unsigned long s = (unsigned long)start;
322         unsigned long e = s + size;
323         int i;
324
325         if (ftrace_disabled || !start)
326                 return;
327
328         /* should not be called from interrupt context */
329         spin_lock(&ftrace_lock);
330
331         for (pg = ftrace_pages_start; pg; pg = pg->next) {
332                 for (i = 0; i < pg->index; i++) {
333                         rec = &pg->records[i];
334
335                         if ((rec->ip >= s) && (rec->ip < e))
336                                 ftrace_free_rec(rec);
337                 }
338         }
339         spin_unlock(&ftrace_lock);
340
341 }
342
343 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
344 {
345         struct dyn_ftrace *rec;
346
347         /* First check for freed records */
348         if (ftrace_free_records) {
349                 rec = ftrace_free_records;
350
351                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
352                         WARN_ON_ONCE(1);
353                         ftrace_free_records = NULL;
354                         ftrace_disabled = 1;
355                         ftrace_enabled = 0;
356                         return NULL;
357                 }
358
359                 ftrace_free_records = (void *)rec->ip;
360                 memset(rec, 0, sizeof(*rec));
361                 return rec;
362         }
363
364         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
365                 if (!ftrace_pages->next)
366                         return NULL;
367                 ftrace_pages = ftrace_pages->next;
368         }
369
370         return &ftrace_pages->records[ftrace_pages->index++];
371 }
372
373 static void
374 ftrace_record_ip(unsigned long ip)
375 {
376         struct dyn_ftrace *node;
377         unsigned long flags;
378         unsigned long key;
379         int resched;
380         int cpu;
381
382         if (!ftrace_enabled || ftrace_disabled)
383                 return;
384
385         resched = need_resched();
386         preempt_disable_notrace();
387
388         /*
389          * We simply need to protect against recursion.
390          * Use the the raw version of smp_processor_id and not
391          * __get_cpu_var which can call debug hooks that can
392          * cause a recursive crash here.
393          */
394         cpu = raw_smp_processor_id();
395         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
396         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
397                 goto out;
398
399         if (unlikely(ftrace_record_suspend))
400                 goto out;
401
402         key = hash_long(ip, FTRACE_HASHBITS);
403
404         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
405
406         if (ftrace_ip_in_hash(ip, key))
407                 goto out;
408
409         ftrace_hash_lock(flags);
410
411         /* This ip may have hit the hash before the lock */
412         if (ftrace_ip_in_hash(ip, key))
413                 goto out_unlock;
414
415         node = ftrace_alloc_dyn_node(ip);
416         if (!node)
417                 goto out_unlock;
418
419         node->ip = ip;
420
421         ftrace_add_hash(node, key);
422
423         ftraced_trigger = 1;
424
425  out_unlock:
426         ftrace_hash_unlock(flags);
427  out:
428         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
429
430         /* prevent recursion with scheduler */
431         if (resched)
432                 preempt_enable_no_resched_notrace();
433         else
434                 preempt_enable_notrace();
435 }
436
437 #define FTRACE_ADDR ((long)(ftrace_caller))
438
439 static int
440 __ftrace_replace_code(struct dyn_ftrace *rec,
441                       unsigned char *old, unsigned char *new, int enable)
442 {
443         unsigned long ip, fl;
444
445         ip = rec->ip;
446
447         if (ftrace_filtered && enable) {
448                 /*
449                  * If filtering is on:
450                  *
451                  * If this record is set to be filtered and
452                  * is enabled then do nothing.
453                  *
454                  * If this record is set to be filtered and
455                  * it is not enabled, enable it.
456                  *
457                  * If this record is not set to be filtered
458                  * and it is not enabled do nothing.
459                  *
460                  * If this record is set not to trace then
461                  * do nothing.
462                  *
463                  * If this record is set not to trace and
464                  * it is enabled then disable it.
465                  *
466                  * If this record is not set to be filtered and
467                  * it is enabled, disable it.
468                  */
469
470                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
471                                    FTRACE_FL_ENABLED);
472
473                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
474                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
475                     !fl || (fl == FTRACE_FL_NOTRACE))
476                         return 0;
477
478                 /*
479                  * If it is enabled disable it,
480                  * otherwise enable it!
481                  */
482                 if (fl & FTRACE_FL_ENABLED) {
483                         /* swap new and old */
484                         new = old;
485                         old = ftrace_call_replace(ip, FTRACE_ADDR);
486                         rec->flags &= ~FTRACE_FL_ENABLED;
487                 } else {
488                         new = ftrace_call_replace(ip, FTRACE_ADDR);
489                         rec->flags |= FTRACE_FL_ENABLED;
490                 }
491         } else {
492
493                 if (enable) {
494                         /*
495                          * If this record is set not to trace and is
496                          * not enabled, do nothing.
497                          */
498                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
499                         if (fl == FTRACE_FL_NOTRACE)
500                                 return 0;
501
502                         new = ftrace_call_replace(ip, FTRACE_ADDR);
503                 } else
504                         old = ftrace_call_replace(ip, FTRACE_ADDR);
505
506                 if (enable) {
507                         if (rec->flags & FTRACE_FL_ENABLED)
508                                 return 0;
509                         rec->flags |= FTRACE_FL_ENABLED;
510                 } else {
511                         if (!(rec->flags & FTRACE_FL_ENABLED))
512                                 return 0;
513                         rec->flags &= ~FTRACE_FL_ENABLED;
514                 }
515         }
516
517         return ftrace_modify_code(ip, old, new);
518 }
519
520 static void ftrace_replace_code(int enable)
521 {
522         int i, failed;
523         unsigned char *new = NULL, *old = NULL;
524         struct dyn_ftrace *rec;
525         struct ftrace_page *pg;
526
527         if (enable)
528                 old = ftrace_nop_replace();
529         else
530                 new = ftrace_nop_replace();
531
532         for (pg = ftrace_pages_start; pg; pg = pg->next) {
533                 for (i = 0; i < pg->index; i++) {
534                         rec = &pg->records[i];
535
536                         /* don't modify code that has already faulted */
537                         if (rec->flags & FTRACE_FL_FAILED)
538                                 continue;
539
540                         /* ignore updates to this record's mcount site */
541                         if (get_kprobe((void *)rec->ip)) {
542                                 freeze_record(rec);
543                                 continue;
544                         } else {
545                                 unfreeze_record(rec);
546                         }
547
548                         failed = __ftrace_replace_code(rec, old, new, enable);
549                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
550                                 rec->flags |= FTRACE_FL_FAILED;
551                                 if ((system_state == SYSTEM_BOOTING) ||
552                                     !core_kernel_text(rec->ip)) {
553                                         ftrace_del_hash(rec);
554                                         ftrace_free_rec(rec);
555                                 }
556                         }
557                 }
558         }
559 }
560
561 static void ftrace_shutdown_replenish(void)
562 {
563         if (ftrace_pages->next)
564                 return;
565
566         /* allocate another page */
567         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
568 }
569
570 static int
571 ftrace_code_disable(struct dyn_ftrace *rec)
572 {
573         unsigned long ip;
574         unsigned char *nop, *call;
575         int failed;
576
577         ip = rec->ip;
578
579         nop = ftrace_nop_replace();
580         call = ftrace_call_replace(ip, MCOUNT_ADDR);
581
582         failed = ftrace_modify_code(ip, call, nop);
583         if (failed) {
584                 rec->flags |= FTRACE_FL_FAILED;
585                 return 0;
586         }
587         return 1;
588 }
589
590 static int __ftrace_update_code(void *ignore);
591
592 static int __ftrace_modify_code(void *data)
593 {
594         unsigned long addr;
595         int *command = data;
596
597         if (*command & FTRACE_ENABLE_CALLS) {
598                 /*
599                  * Update any recorded ips now that we have the
600                  * machine stopped
601                  */
602                 __ftrace_update_code(NULL);
603                 ftrace_replace_code(1);
604                 tracing_on = 1;
605         } else if (*command & FTRACE_DISABLE_CALLS) {
606                 ftrace_replace_code(0);
607                 tracing_on = 0;
608         }
609
610         if (*command & FTRACE_UPDATE_TRACE_FUNC)
611                 ftrace_update_ftrace_func(ftrace_trace_function);
612
613         if (*command & FTRACE_ENABLE_MCOUNT) {
614                 addr = (unsigned long)ftrace_record_ip;
615                 ftrace_mcount_set(&addr);
616         } else if (*command & FTRACE_DISABLE_MCOUNT) {
617                 addr = (unsigned long)ftrace_stub;
618                 ftrace_mcount_set(&addr);
619         }
620
621         return 0;
622 }
623
624 static void ftrace_run_update_code(int command)
625 {
626         stop_machine(__ftrace_modify_code, &command, NULL);
627 }
628
629 void ftrace_disable_daemon(void)
630 {
631         /* Stop the daemon from calling kstop_machine */
632         mutex_lock(&ftraced_lock);
633         ftraced_stop = 1;
634         mutex_unlock(&ftraced_lock);
635
636         ftrace_force_update();
637 }
638
639 void ftrace_enable_daemon(void)
640 {
641         mutex_lock(&ftraced_lock);
642         ftraced_stop = 0;
643         mutex_unlock(&ftraced_lock);
644
645         ftrace_force_update();
646 }
647
648 static ftrace_func_t saved_ftrace_func;
649
650 static void ftrace_startup(void)
651 {
652         int command = 0;
653
654         if (unlikely(ftrace_disabled))
655                 return;
656
657         mutex_lock(&ftraced_lock);
658         ftraced_suspend++;
659         if (ftraced_suspend == 1)
660                 command |= FTRACE_ENABLE_CALLS;
661
662         if (saved_ftrace_func != ftrace_trace_function) {
663                 saved_ftrace_func = ftrace_trace_function;
664                 command |= FTRACE_UPDATE_TRACE_FUNC;
665         }
666
667         if (!command || !ftrace_enabled)
668                 goto out;
669
670         ftrace_run_update_code(command);
671  out:
672         mutex_unlock(&ftraced_lock);
673 }
674
675 static void ftrace_shutdown(void)
676 {
677         int command = 0;
678
679         if (unlikely(ftrace_disabled))
680                 return;
681
682         mutex_lock(&ftraced_lock);
683         ftraced_suspend--;
684         if (!ftraced_suspend)
685                 command |= FTRACE_DISABLE_CALLS;
686
687         if (saved_ftrace_func != ftrace_trace_function) {
688                 saved_ftrace_func = ftrace_trace_function;
689                 command |= FTRACE_UPDATE_TRACE_FUNC;
690         }
691
692         if (!command || !ftrace_enabled)
693                 goto out;
694
695         ftrace_run_update_code(command);
696  out:
697         mutex_unlock(&ftraced_lock);
698 }
699
700 static void ftrace_startup_sysctl(void)
701 {
702         int command = FTRACE_ENABLE_MCOUNT;
703
704         if (unlikely(ftrace_disabled))
705                 return;
706
707         mutex_lock(&ftraced_lock);
708         /* Force update next time */
709         saved_ftrace_func = NULL;
710         /* ftraced_suspend is true if we want ftrace running */
711         if (ftraced_suspend)
712                 command |= FTRACE_ENABLE_CALLS;
713
714         ftrace_run_update_code(command);
715         mutex_unlock(&ftraced_lock);
716 }
717
718 static void ftrace_shutdown_sysctl(void)
719 {
720         int command = FTRACE_DISABLE_MCOUNT;
721
722         if (unlikely(ftrace_disabled))
723                 return;
724
725         mutex_lock(&ftraced_lock);
726         /* ftraced_suspend is true if ftrace is running */
727         if (ftraced_suspend)
728                 command |= FTRACE_DISABLE_CALLS;
729
730         ftrace_run_update_code(command);
731         mutex_unlock(&ftraced_lock);
732 }
733
734 static cycle_t          ftrace_update_time;
735 static unsigned long    ftrace_update_cnt;
736 unsigned long           ftrace_update_tot_cnt;
737
738 static int __ftrace_update_code(void *ignore)
739 {
740         int i, save_ftrace_enabled;
741         cycle_t start, stop;
742         struct dyn_ftrace *p;
743         struct hlist_node *t, *n;
744         struct hlist_head *head, temp_list;
745
746         /* Don't be recording funcs now */
747         ftrace_record_suspend++;
748         save_ftrace_enabled = ftrace_enabled;
749         ftrace_enabled = 0;
750
751         start = ftrace_now(raw_smp_processor_id());
752         ftrace_update_cnt = 0;
753
754         /* No locks needed, the machine is stopped! */
755         for (i = 0; i < FTRACE_HASHSIZE; i++) {
756                 INIT_HLIST_HEAD(&temp_list);
757                 head = &ftrace_hash[i];
758
759                 /* all CPUS are stopped, we are safe to modify code */
760                 hlist_for_each_entry_safe(p, t, n, head, node) {
761                         /* Skip over failed records which have not been
762                          * freed. */
763                         if (p->flags & FTRACE_FL_FAILED)
764                                 continue;
765
766                         /* Unconverted records are always at the head of the
767                          * hash bucket. Once we encounter a converted record,
768                          * simply skip over to the next bucket. Saves ftraced
769                          * some processor cycles (ftrace does its bid for
770                          * global warming :-p ). */
771                         if (p->flags & (FTRACE_FL_CONVERTED))
772                                 break;
773
774                         /* Ignore updates to this record's mcount site.
775                          * Reintroduce this record at the head of this
776                          * bucket to attempt to "convert" it again if
777                          * the kprobe on it is unregistered before the
778                          * next run. */
779                         if (get_kprobe((void *)p->ip)) {
780                                 ftrace_del_hash(p);
781                                 INIT_HLIST_NODE(&p->node);
782                                 hlist_add_head(&p->node, &temp_list);
783                                 freeze_record(p);
784                                 continue;
785                         } else {
786                                 unfreeze_record(p);
787                         }
788
789                         /* convert record (i.e, patch mcount-call with NOP) */
790                         if (ftrace_code_disable(p)) {
791                                 p->flags |= FTRACE_FL_CONVERTED;
792                                 ftrace_update_cnt++;
793                         } else {
794                                 if ((system_state == SYSTEM_BOOTING) ||
795                                     !core_kernel_text(p->ip)) {
796                                         ftrace_del_hash(p);
797                                         ftrace_free_rec(p);
798                                 }
799                         }
800                 }
801
802                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
803                         hlist_del(&p->node);
804                         INIT_HLIST_NODE(&p->node);
805                         hlist_add_head(&p->node, head);
806                 }
807         }
808
809         stop = ftrace_now(raw_smp_processor_id());
810         ftrace_update_time = stop - start;
811         ftrace_update_tot_cnt += ftrace_update_cnt;
812         ftraced_trigger = 0;
813
814         ftrace_enabled = save_ftrace_enabled;
815         ftrace_record_suspend--;
816
817         return 0;
818 }
819
820 static int ftrace_update_code(void)
821 {
822         if (unlikely(ftrace_disabled) ||
823             !ftrace_enabled || !ftraced_trigger)
824                 return 0;
825
826         stop_machine(__ftrace_update_code, NULL, NULL);
827
828         return 1;
829 }
830
831 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
832 {
833         struct ftrace_page *pg;
834         int cnt;
835         int i;
836
837         /* allocate a few pages */
838         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
839         if (!ftrace_pages_start)
840                 return -1;
841
842         /*
843          * Allocate a few more pages.
844          *
845          * TODO: have some parser search vmlinux before
846          *   final linking to find all calls to ftrace.
847          *   Then we can:
848          *    a) know how many pages to allocate.
849          *     and/or
850          *    b) set up the table then.
851          *
852          *  The dynamic code is still necessary for
853          *  modules.
854          */
855
856         pg = ftrace_pages = ftrace_pages_start;
857
858         cnt = num_to_init / ENTRIES_PER_PAGE;
859         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
860                 num_to_init, cnt);
861
862         for (i = 0; i < cnt; i++) {
863                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
864
865                 /* If we fail, we'll try later anyway */
866                 if (!pg->next)
867                         break;
868
869                 pg = pg->next;
870         }
871
872         return 0;
873 }
874
875 enum {
876         FTRACE_ITER_FILTER      = (1 << 0),
877         FTRACE_ITER_CONT        = (1 << 1),
878         FTRACE_ITER_NOTRACE     = (1 << 2),
879         FTRACE_ITER_FAILURES    = (1 << 3),
880 };
881
882 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
883
884 struct ftrace_iterator {
885         loff_t                  pos;
886         struct ftrace_page      *pg;
887         unsigned                idx;
888         unsigned                flags;
889         unsigned char           buffer[FTRACE_BUFF_MAX+1];
890         unsigned                buffer_idx;
891         unsigned                filtered;
892 };
893
894 static void *
895 t_next(struct seq_file *m, void *v, loff_t *pos)
896 {
897         struct ftrace_iterator *iter = m->private;
898         struct dyn_ftrace *rec = NULL;
899
900         (*pos)++;
901
902         /* should not be called from interrupt context */
903         spin_lock(&ftrace_lock);
904  retry:
905         if (iter->idx >= iter->pg->index) {
906                 if (iter->pg->next) {
907                         iter->pg = iter->pg->next;
908                         iter->idx = 0;
909                         goto retry;
910                 }
911         } else {
912                 rec = &iter->pg->records[iter->idx++];
913                 if ((rec->flags & FTRACE_FL_FREE) ||
914
915                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
916                      (rec->flags & FTRACE_FL_FAILED)) ||
917
918                     ((iter->flags & FTRACE_ITER_FAILURES) &&
919                      !(rec->flags & FTRACE_FL_FAILED)) ||
920
921                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
922                      !(rec->flags & FTRACE_FL_NOTRACE))) {
923                         rec = NULL;
924                         goto retry;
925                 }
926         }
927         spin_unlock(&ftrace_lock);
928
929         iter->pos = *pos;
930
931         return rec;
932 }
933
934 static void *t_start(struct seq_file *m, loff_t *pos)
935 {
936         struct ftrace_iterator *iter = m->private;
937         void *p = NULL;
938         loff_t l = -1;
939
940         if (*pos != iter->pos) {
941                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
942                         ;
943         } else {
944                 l = *pos;
945                 p = t_next(m, p, &l);
946         }
947
948         return p;
949 }
950
951 static void t_stop(struct seq_file *m, void *p)
952 {
953 }
954
955 static int t_show(struct seq_file *m, void *v)
956 {
957         struct dyn_ftrace *rec = v;
958         char str[KSYM_SYMBOL_LEN];
959
960         if (!rec)
961                 return 0;
962
963         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
964
965         seq_printf(m, "%s\n", str);
966
967         return 0;
968 }
969
970 static struct seq_operations show_ftrace_seq_ops = {
971         .start = t_start,
972         .next = t_next,
973         .stop = t_stop,
974         .show = t_show,
975 };
976
977 static int
978 ftrace_avail_open(struct inode *inode, struct file *file)
979 {
980         struct ftrace_iterator *iter;
981         int ret;
982
983         if (unlikely(ftrace_disabled))
984                 return -ENODEV;
985
986         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
987         if (!iter)
988                 return -ENOMEM;
989
990         iter->pg = ftrace_pages_start;
991         iter->pos = -1;
992
993         ret = seq_open(file, &show_ftrace_seq_ops);
994         if (!ret) {
995                 struct seq_file *m = file->private_data;
996
997                 m->private = iter;
998         } else {
999                 kfree(iter);
1000         }
1001
1002         return ret;
1003 }
1004
1005 int ftrace_avail_release(struct inode *inode, struct file *file)
1006 {
1007         struct seq_file *m = (struct seq_file *)file->private_data;
1008         struct ftrace_iterator *iter = m->private;
1009
1010         seq_release(inode, file);
1011         kfree(iter);
1012
1013         return 0;
1014 }
1015
1016 static int
1017 ftrace_failures_open(struct inode *inode, struct file *file)
1018 {
1019         int ret;
1020         struct seq_file *m;
1021         struct ftrace_iterator *iter;
1022
1023         ret = ftrace_avail_open(inode, file);
1024         if (!ret) {
1025                 m = (struct seq_file *)file->private_data;
1026                 iter = (struct ftrace_iterator *)m->private;
1027                 iter->flags = FTRACE_ITER_FAILURES;
1028         }
1029
1030         return ret;
1031 }
1032
1033
1034 static void ftrace_filter_reset(int enable)
1035 {
1036         struct ftrace_page *pg;
1037         struct dyn_ftrace *rec;
1038         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1039         unsigned i;
1040
1041         /* should not be called from interrupt context */
1042         spin_lock(&ftrace_lock);
1043         if (enable)
1044                 ftrace_filtered = 0;
1045         pg = ftrace_pages_start;
1046         while (pg) {
1047                 for (i = 0; i < pg->index; i++) {
1048                         rec = &pg->records[i];
1049                         if (rec->flags & FTRACE_FL_FAILED)
1050                                 continue;
1051                         rec->flags &= ~type;
1052                 }
1053                 pg = pg->next;
1054         }
1055         spin_unlock(&ftrace_lock);
1056 }
1057
1058 static int
1059 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1060 {
1061         struct ftrace_iterator *iter;
1062         int ret = 0;
1063
1064         if (unlikely(ftrace_disabled))
1065                 return -ENODEV;
1066
1067         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1068         if (!iter)
1069                 return -ENOMEM;
1070
1071         mutex_lock(&ftrace_regex_lock);
1072         if ((file->f_mode & FMODE_WRITE) &&
1073             !(file->f_flags & O_APPEND))
1074                 ftrace_filter_reset(enable);
1075
1076         if (file->f_mode & FMODE_READ) {
1077                 iter->pg = ftrace_pages_start;
1078                 iter->pos = -1;
1079                 iter->flags = enable ? FTRACE_ITER_FILTER :
1080                         FTRACE_ITER_NOTRACE;
1081
1082                 ret = seq_open(file, &show_ftrace_seq_ops);
1083                 if (!ret) {
1084                         struct seq_file *m = file->private_data;
1085                         m->private = iter;
1086                 } else
1087                         kfree(iter);
1088         } else
1089                 file->private_data = iter;
1090         mutex_unlock(&ftrace_regex_lock);
1091
1092         return ret;
1093 }
1094
1095 static int
1096 ftrace_filter_open(struct inode *inode, struct file *file)
1097 {
1098         return ftrace_regex_open(inode, file, 1);
1099 }
1100
1101 static int
1102 ftrace_notrace_open(struct inode *inode, struct file *file)
1103 {
1104         return ftrace_regex_open(inode, file, 0);
1105 }
1106
1107 static ssize_t
1108 ftrace_regex_read(struct file *file, char __user *ubuf,
1109                        size_t cnt, loff_t *ppos)
1110 {
1111         if (file->f_mode & FMODE_READ)
1112                 return seq_read(file, ubuf, cnt, ppos);
1113         else
1114                 return -EPERM;
1115 }
1116
1117 static loff_t
1118 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1119 {
1120         loff_t ret;
1121
1122         if (file->f_mode & FMODE_READ)
1123                 ret = seq_lseek(file, offset, origin);
1124         else
1125                 file->f_pos = ret = 1;
1126
1127         return ret;
1128 }
1129
1130 enum {
1131         MATCH_FULL,
1132         MATCH_FRONT_ONLY,
1133         MATCH_MIDDLE_ONLY,
1134         MATCH_END_ONLY,
1135 };
1136
1137 static void
1138 ftrace_match(unsigned char *buff, int len, int enable)
1139 {
1140         char str[KSYM_SYMBOL_LEN];
1141         char *search = NULL;
1142         struct ftrace_page *pg;
1143         struct dyn_ftrace *rec;
1144         int type = MATCH_FULL;
1145         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1146         unsigned i, match = 0, search_len = 0;
1147
1148         for (i = 0; i < len; i++) {
1149                 if (buff[i] == '*') {
1150                         if (!i) {
1151                                 search = buff + i + 1;
1152                                 type = MATCH_END_ONLY;
1153                                 search_len = len - (i + 1);
1154                         } else {
1155                                 if (type == MATCH_END_ONLY) {
1156                                         type = MATCH_MIDDLE_ONLY;
1157                                 } else {
1158                                         match = i;
1159                                         type = MATCH_FRONT_ONLY;
1160                                 }
1161                                 buff[i] = 0;
1162                                 break;
1163                         }
1164                 }
1165         }
1166
1167         /* should not be called from interrupt context */
1168         spin_lock(&ftrace_lock);
1169         if (enable)
1170                 ftrace_filtered = 1;
1171         pg = ftrace_pages_start;
1172         while (pg) {
1173                 for (i = 0; i < pg->index; i++) {
1174                         int matched = 0;
1175                         char *ptr;
1176
1177                         rec = &pg->records[i];
1178                         if (rec->flags & FTRACE_FL_FAILED)
1179                                 continue;
1180                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1181                         switch (type) {
1182                         case MATCH_FULL:
1183                                 if (strcmp(str, buff) == 0)
1184                                         matched = 1;
1185                                 break;
1186                         case MATCH_FRONT_ONLY:
1187                                 if (memcmp(str, buff, match) == 0)
1188                                         matched = 1;
1189                                 break;
1190                         case MATCH_MIDDLE_ONLY:
1191                                 if (strstr(str, search))
1192                                         matched = 1;
1193                                 break;
1194                         case MATCH_END_ONLY:
1195                                 ptr = strstr(str, search);
1196                                 if (ptr && (ptr[search_len] == 0))
1197                                         matched = 1;
1198                                 break;
1199                         }
1200                         if (matched)
1201                                 rec->flags |= flag;
1202                 }
1203                 pg = pg->next;
1204         }
1205         spin_unlock(&ftrace_lock);
1206 }
1207
1208 static ssize_t
1209 ftrace_regex_write(struct file *file, const char __user *ubuf,
1210                    size_t cnt, loff_t *ppos, int enable)
1211 {
1212         struct ftrace_iterator *iter;
1213         char ch;
1214         size_t read = 0;
1215         ssize_t ret;
1216
1217         if (!cnt || cnt < 0)
1218                 return 0;
1219
1220         mutex_lock(&ftrace_regex_lock);
1221
1222         if (file->f_mode & FMODE_READ) {
1223                 struct seq_file *m = file->private_data;
1224                 iter = m->private;
1225         } else
1226                 iter = file->private_data;
1227
1228         if (!*ppos) {
1229                 iter->flags &= ~FTRACE_ITER_CONT;
1230                 iter->buffer_idx = 0;
1231         }
1232
1233         ret = get_user(ch, ubuf++);
1234         if (ret)
1235                 goto out;
1236         read++;
1237         cnt--;
1238
1239         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1240                 /* skip white space */
1241                 while (cnt && isspace(ch)) {
1242                         ret = get_user(ch, ubuf++);
1243                         if (ret)
1244                                 goto out;
1245                         read++;
1246                         cnt--;
1247                 }
1248
1249                 if (isspace(ch)) {
1250                         file->f_pos += read;
1251                         ret = read;
1252                         goto out;
1253                 }
1254
1255                 iter->buffer_idx = 0;
1256         }
1257
1258         while (cnt && !isspace(ch)) {
1259                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1260                         iter->buffer[iter->buffer_idx++] = ch;
1261                 else {
1262                         ret = -EINVAL;
1263                         goto out;
1264                 }
1265                 ret = get_user(ch, ubuf++);
1266                 if (ret)
1267                         goto out;
1268                 read++;
1269                 cnt--;
1270         }
1271
1272         if (isspace(ch)) {
1273                 iter->filtered++;
1274                 iter->buffer[iter->buffer_idx] = 0;
1275                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1276                 iter->buffer_idx = 0;
1277         } else
1278                 iter->flags |= FTRACE_ITER_CONT;
1279
1280
1281         file->f_pos += read;
1282
1283         ret = read;
1284  out:
1285         mutex_unlock(&ftrace_regex_lock);
1286
1287         return ret;
1288 }
1289
1290 static ssize_t
1291 ftrace_filter_write(struct file *file, const char __user *ubuf,
1292                     size_t cnt, loff_t *ppos)
1293 {
1294         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1295 }
1296
1297 static ssize_t
1298 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1299                      size_t cnt, loff_t *ppos)
1300 {
1301         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1302 }
1303
1304 static void
1305 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1306 {
1307         if (unlikely(ftrace_disabled))
1308                 return;
1309
1310         mutex_lock(&ftrace_regex_lock);
1311         if (reset)
1312                 ftrace_filter_reset(enable);
1313         if (buf)
1314                 ftrace_match(buf, len, enable);
1315         mutex_unlock(&ftrace_regex_lock);
1316 }
1317
1318 /**
1319  * ftrace_set_filter - set a function to filter on in ftrace
1320  * @buf - the string that holds the function filter text.
1321  * @len - the length of the string.
1322  * @reset - non zero to reset all filters before applying this filter.
1323  *
1324  * Filters denote which functions should be enabled when tracing is enabled.
1325  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1326  */
1327 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1328 {
1329         ftrace_set_regex(buf, len, reset, 1);
1330 }
1331
1332 /**
1333  * ftrace_set_notrace - set a function to not trace in ftrace
1334  * @buf - the string that holds the function notrace text.
1335  * @len - the length of the string.
1336  * @reset - non zero to reset all filters before applying this filter.
1337  *
1338  * Notrace Filters denote which functions should not be enabled when tracing
1339  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1340  * for tracing.
1341  */
1342 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1343 {
1344         ftrace_set_regex(buf, len, reset, 0);
1345 }
1346
1347 static int
1348 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1349 {
1350         struct seq_file *m = (struct seq_file *)file->private_data;
1351         struct ftrace_iterator *iter;
1352
1353         mutex_lock(&ftrace_regex_lock);
1354         if (file->f_mode & FMODE_READ) {
1355                 iter = m->private;
1356
1357                 seq_release(inode, file);
1358         } else
1359                 iter = file->private_data;
1360
1361         if (iter->buffer_idx) {
1362                 iter->filtered++;
1363                 iter->buffer[iter->buffer_idx] = 0;
1364                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1365         }
1366
1367         mutex_lock(&ftrace_sysctl_lock);
1368         mutex_lock(&ftraced_lock);
1369         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1370                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1371         mutex_unlock(&ftraced_lock);
1372         mutex_unlock(&ftrace_sysctl_lock);
1373
1374         kfree(iter);
1375         mutex_unlock(&ftrace_regex_lock);
1376         return 0;
1377 }
1378
1379 static int
1380 ftrace_filter_release(struct inode *inode, struct file *file)
1381 {
1382         return ftrace_regex_release(inode, file, 1);
1383 }
1384
1385 static int
1386 ftrace_notrace_release(struct inode *inode, struct file *file)
1387 {
1388         return ftrace_regex_release(inode, file, 0);
1389 }
1390
1391 static ssize_t
1392 ftraced_read(struct file *filp, char __user *ubuf,
1393                      size_t cnt, loff_t *ppos)
1394 {
1395         /* don't worry about races */
1396         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1397         int r = strlen(buf);
1398
1399         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1400 }
1401
1402 static ssize_t
1403 ftraced_write(struct file *filp, const char __user *ubuf,
1404                       size_t cnt, loff_t *ppos)
1405 {
1406         char buf[64];
1407         long val;
1408         int ret;
1409
1410         if (cnt >= sizeof(buf))
1411                 return -EINVAL;
1412
1413         if (copy_from_user(&buf, ubuf, cnt))
1414                 return -EFAULT;
1415
1416         if (strncmp(buf, "enable", 6) == 0)
1417                 val = 1;
1418         else if (strncmp(buf, "disable", 7) == 0)
1419                 val = 0;
1420         else {
1421                 buf[cnt] = 0;
1422
1423                 ret = strict_strtoul(buf, 10, &val);
1424                 if (ret < 0)
1425                         return ret;
1426
1427                 val = !!val;
1428         }
1429
1430         if (val)
1431                 ftrace_enable_daemon();
1432         else
1433                 ftrace_disable_daemon();
1434
1435         filp->f_pos += cnt;
1436
1437         return cnt;
1438 }
1439
1440 static struct file_operations ftrace_avail_fops = {
1441         .open = ftrace_avail_open,
1442         .read = seq_read,
1443         .llseek = seq_lseek,
1444         .release = ftrace_avail_release,
1445 };
1446
1447 static struct file_operations ftrace_failures_fops = {
1448         .open = ftrace_failures_open,
1449         .read = seq_read,
1450         .llseek = seq_lseek,
1451         .release = ftrace_avail_release,
1452 };
1453
1454 static struct file_operations ftrace_filter_fops = {
1455         .open = ftrace_filter_open,
1456         .read = ftrace_regex_read,
1457         .write = ftrace_filter_write,
1458         .llseek = ftrace_regex_lseek,
1459         .release = ftrace_filter_release,
1460 };
1461
1462 static struct file_operations ftrace_notrace_fops = {
1463         .open = ftrace_notrace_open,
1464         .read = ftrace_regex_read,
1465         .write = ftrace_notrace_write,
1466         .llseek = ftrace_regex_lseek,
1467         .release = ftrace_notrace_release,
1468 };
1469
1470 static struct file_operations ftraced_fops = {
1471         .open = tracing_open_generic,
1472         .read = ftraced_read,
1473         .write = ftraced_write,
1474 };
1475
1476 /**
1477  * ftrace_force_update - force an update to all recording ftrace functions
1478  */
1479 int ftrace_force_update(void)
1480 {
1481         int ret = 0;
1482
1483         if (unlikely(ftrace_disabled))
1484                 return -ENODEV;
1485
1486         mutex_lock(&ftrace_sysctl_lock);
1487         mutex_lock(&ftraced_lock);
1488
1489         /*
1490          * If ftraced_trigger is not set, then there is nothing
1491          * to update.
1492          */
1493         if (ftraced_trigger && !ftrace_update_code())
1494                 ret = -EBUSY;
1495
1496         mutex_unlock(&ftraced_lock);
1497         mutex_unlock(&ftrace_sysctl_lock);
1498
1499         return ret;
1500 }
1501
1502 static void ftrace_force_shutdown(void)
1503 {
1504         struct task_struct *task;
1505         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1506
1507         mutex_lock(&ftraced_lock);
1508         task = ftraced_task;
1509         ftraced_task = NULL;
1510         ftraced_suspend = -1;
1511         ftrace_run_update_code(command);
1512         mutex_unlock(&ftraced_lock);
1513
1514         if (task)
1515                 kthread_stop(task);
1516 }
1517
1518 static __init int ftrace_init_debugfs(void)
1519 {
1520         struct dentry *d_tracer;
1521         struct dentry *entry;
1522
1523         d_tracer = tracing_init_dentry();
1524
1525         entry = debugfs_create_file("available_filter_functions", 0444,
1526                                     d_tracer, NULL, &ftrace_avail_fops);
1527         if (!entry)
1528                 pr_warning("Could not create debugfs "
1529                            "'available_filter_functions' entry\n");
1530
1531         entry = debugfs_create_file("failures", 0444,
1532                                     d_tracer, NULL, &ftrace_failures_fops);
1533         if (!entry)
1534                 pr_warning("Could not create debugfs 'failures' entry\n");
1535
1536         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1537                                     NULL, &ftrace_filter_fops);
1538         if (!entry)
1539                 pr_warning("Could not create debugfs "
1540                            "'set_ftrace_filter' entry\n");
1541
1542         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1543                                     NULL, &ftrace_notrace_fops);
1544         if (!entry)
1545                 pr_warning("Could not create debugfs "
1546                            "'set_ftrace_notrace' entry\n");
1547
1548         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1549                                     NULL, &ftraced_fops);
1550         if (!entry)
1551                 pr_warning("Could not create debugfs "
1552                            "'ftraced_enabled' entry\n");
1553         return 0;
1554 }
1555
1556 fs_initcall(ftrace_init_debugfs);
1557
1558 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1559 static int ftrace_convert_nops(unsigned long *start,
1560                                unsigned long *end)
1561 {
1562         unsigned long *p;
1563         unsigned long addr;
1564         unsigned long flags;
1565
1566         p = start;
1567         while (p < end) {
1568                 addr = ftrace_call_adjust(*p++);
1569                 /* should not be called from interrupt context */
1570                 spin_lock(&ftrace_lock);
1571                 ftrace_record_ip(addr);
1572                 spin_unlock(&ftrace_lock);
1573                 ftrace_shutdown_replenish();
1574         }
1575
1576         /* p is ignored */
1577         local_irq_save(flags);
1578         __ftrace_update_code(p);
1579         local_irq_restore(flags);
1580
1581         return 0;
1582 }
1583
1584 void ftrace_init_module(unsigned long *start, unsigned long *end)
1585 {
1586         if (ftrace_disabled || start == end)
1587                 return;
1588         ftrace_convert_nops(start, end);
1589 }
1590
1591 extern unsigned long __start_mcount_loc[];
1592 extern unsigned long __stop_mcount_loc[];
1593
1594 void __init ftrace_init(void)
1595 {
1596         unsigned long count, addr, flags;
1597         int ret;
1598
1599         /* Keep the ftrace pointer to the stub */
1600         addr = (unsigned long)ftrace_stub;
1601
1602         local_irq_save(flags);
1603         ftrace_dyn_arch_init(&addr);
1604         local_irq_restore(flags);
1605
1606         /* ftrace_dyn_arch_init places the return code in addr */
1607         if (addr)
1608                 goto failed;
1609
1610         count = __stop_mcount_loc - __start_mcount_loc;
1611
1612         ret = ftrace_dyn_table_alloc(count);
1613         if (ret)
1614                 goto failed;
1615
1616         last_ftrace_enabled = ftrace_enabled = 1;
1617
1618         ret = ftrace_convert_nops(__start_mcount_loc,
1619                                   __stop_mcount_loc);
1620
1621         return;
1622  failed:
1623         ftrace_disabled = 1;
1624 }
1625 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1626 static int ftraced(void *ignore)
1627 {
1628         unsigned long usecs;
1629
1630         while (!kthread_should_stop()) {
1631
1632                 set_current_state(TASK_INTERRUPTIBLE);
1633
1634                 /* check once a second */
1635                 schedule_timeout(HZ);
1636
1637                 if (unlikely(ftrace_disabled))
1638                         continue;
1639
1640                 mutex_lock(&ftrace_sysctl_lock);
1641                 mutex_lock(&ftraced_lock);
1642                 if (!ftraced_suspend && !ftraced_stop &&
1643                     ftrace_update_code()) {
1644                         usecs = nsecs_to_usecs(ftrace_update_time);
1645                         if (ftrace_update_tot_cnt > 100000) {
1646                                 ftrace_update_tot_cnt = 0;
1647                                 pr_info("hm, dftrace overflow: %lu change%s"
1648                                         " (%lu total) in %lu usec%s\n",
1649                                         ftrace_update_cnt,
1650                                         ftrace_update_cnt != 1 ? "s" : "",
1651                                         ftrace_update_tot_cnt,
1652                                         usecs, usecs != 1 ? "s" : "");
1653                                 ftrace_disabled = 1;
1654                                 WARN_ON_ONCE(1);
1655                         }
1656                 }
1657                 mutex_unlock(&ftraced_lock);
1658                 mutex_unlock(&ftrace_sysctl_lock);
1659
1660                 ftrace_shutdown_replenish();
1661         }
1662         __set_current_state(TASK_RUNNING);
1663         return 0;
1664 }
1665
1666 static int __init ftrace_dynamic_init(void)
1667 {
1668         struct task_struct *p;
1669         unsigned long addr;
1670         int ret;
1671
1672         addr = (unsigned long)ftrace_record_ip;
1673
1674         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1675
1676         /* ftrace_dyn_arch_init places the return code in addr */
1677         if (addr) {
1678                 ret = (int)addr;
1679                 goto failed;
1680         }
1681
1682         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1683         if (ret)
1684                 goto failed;
1685
1686         p = kthread_run(ftraced, NULL, "ftraced");
1687         if (IS_ERR(p)) {
1688                 ret = -1;
1689                 goto failed;
1690         }
1691
1692         last_ftrace_enabled = ftrace_enabled = 1;
1693         ftraced_task = p;
1694
1695         return 0;
1696
1697  failed:
1698         ftrace_disabled = 1;
1699         return ret;
1700 }
1701
1702 core_initcall(ftrace_dynamic_init);
1703 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1704
1705 #else
1706 # define ftrace_startup()               do { } while (0)
1707 # define ftrace_shutdown()              do { } while (0)
1708 # define ftrace_startup_sysctl()        do { } while (0)
1709 # define ftrace_shutdown_sysctl()       do { } while (0)
1710 # define ftrace_force_shutdown()        do { } while (0)
1711 #endif /* CONFIG_DYNAMIC_FTRACE */
1712
1713 /**
1714  * ftrace_kill_atomic - kill ftrace from critical sections
1715  *
1716  * This function should be used by panic code. It stops ftrace
1717  * but in a not so nice way. If you need to simply kill ftrace
1718  * from a non-atomic section, use ftrace_kill.
1719  */
1720 void ftrace_kill_atomic(void)
1721 {
1722         ftrace_disabled = 1;
1723         ftrace_enabled = 0;
1724 #ifdef CONFIG_DYNAMIC_FTRACE
1725         ftraced_suspend = -1;
1726 #endif
1727         clear_ftrace_function();
1728 }
1729
1730 /**
1731  * ftrace_kill - totally shutdown ftrace
1732  *
1733  * This is a safety measure. If something was detected that seems
1734  * wrong, calling this function will keep ftrace from doing
1735  * any more modifications, and updates.
1736  * used when something went wrong.
1737  */
1738 void ftrace_kill(void)
1739 {
1740         mutex_lock(&ftrace_sysctl_lock);
1741         ftrace_disabled = 1;
1742         ftrace_enabled = 0;
1743
1744         clear_ftrace_function();
1745         mutex_unlock(&ftrace_sysctl_lock);
1746
1747         /* Try to totally disable ftrace */
1748         ftrace_force_shutdown();
1749 }
1750
1751 /**
1752  * register_ftrace_function - register a function for profiling
1753  * @ops - ops structure that holds the function for profiling.
1754  *
1755  * Register a function to be called by all functions in the
1756  * kernel.
1757  *
1758  * Note: @ops->func and all the functions it calls must be labeled
1759  *       with "notrace", otherwise it will go into a
1760  *       recursive loop.
1761  */
1762 int register_ftrace_function(struct ftrace_ops *ops)
1763 {
1764         int ret;
1765
1766         if (unlikely(ftrace_disabled))
1767                 return -1;
1768
1769         mutex_lock(&ftrace_sysctl_lock);
1770         ret = __register_ftrace_function(ops);
1771         ftrace_startup();
1772         mutex_unlock(&ftrace_sysctl_lock);
1773
1774         return ret;
1775 }
1776
1777 /**
1778  * unregister_ftrace_function - unresgister a function for profiling.
1779  * @ops - ops structure that holds the function to unregister
1780  *
1781  * Unregister a function that was added to be called by ftrace profiling.
1782  */
1783 int unregister_ftrace_function(struct ftrace_ops *ops)
1784 {
1785         int ret;
1786
1787         mutex_lock(&ftrace_sysctl_lock);
1788         ret = __unregister_ftrace_function(ops);
1789         ftrace_shutdown();
1790         mutex_unlock(&ftrace_sysctl_lock);
1791
1792         return ret;
1793 }
1794
1795 int
1796 ftrace_enable_sysctl(struct ctl_table *table, int write,
1797                      struct file *file, void __user *buffer, size_t *lenp,
1798                      loff_t *ppos)
1799 {
1800         int ret;
1801
1802         if (unlikely(ftrace_disabled))
1803                 return -ENODEV;
1804
1805         mutex_lock(&ftrace_sysctl_lock);
1806
1807         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1808
1809         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1810                 goto out;
1811
1812         last_ftrace_enabled = ftrace_enabled;
1813
1814         if (ftrace_enabled) {
1815
1816                 ftrace_startup_sysctl();
1817
1818                 /* we are starting ftrace again */
1819                 if (ftrace_list != &ftrace_list_end) {
1820                         if (ftrace_list->next == &ftrace_list_end)
1821                                 ftrace_trace_function = ftrace_list->func;
1822                         else
1823                                 ftrace_trace_function = ftrace_list_func;
1824                 }
1825
1826         } else {
1827                 /* stopping ftrace calls (just send to ftrace_stub) */
1828                 ftrace_trace_function = ftrace_stub;
1829
1830                 ftrace_shutdown_sysctl();
1831         }
1832
1833  out:
1834         mutex_unlock(&ftrace_sysctl_lock);
1835         return ret;
1836 }