]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
Fix nfsd truncation of readdir results
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* should not be called from interrupt context */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         /* should not be called from interrupt context */
119         spin_lock(&ftrace_lock);
120
121         /*
122          * If we are removing the last function, then simply point
123          * to the ftrace_stub.
124          */
125         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126                 ftrace_trace_function = ftrace_stub;
127                 ftrace_list = &ftrace_list_end;
128                 goto out;
129         }
130
131         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132                 if (*p == ops)
133                         break;
134
135         if (*p != ops) {
136                 ret = -1;
137                 goto out;
138         }
139
140         *p = (*p)->next;
141
142         if (ftrace_enabled) {
143                 /* If we only have one func left, then call that directly */
144                 if (ftrace_list == &ftrace_list_end ||
145                     ftrace_list->next == &ftrace_list_end)
146                         ftrace_trace_function = ftrace_list->func;
147         }
148
149  out:
150         spin_unlock(&ftrace_lock);
151
152         return ret;
153 }
154
155 #ifdef CONFIG_DYNAMIC_FTRACE
156
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
158 /*
159  * The hash lock is only needed when the recording of the mcount
160  * callers are dynamic. That is, by the caller themselves and
161  * not recorded via the compilation.
162  */
163 static DEFINE_SPINLOCK(ftrace_hash_lock);
164 #define ftrace_hash_lock(flags)   spin_lock_irqsave(&ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) \
166                         spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167 #else
168 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
169 #define ftrace_hash_lock(flags)   do { (void)(flags); } while (0)
170 #define ftrace_hash_unlock(flags) do { } while(0)
171 #endif
172
173 /*
174  * Since MCOUNT_ADDR may point to mcount itself, we do not want
175  * to get it confused by reading a reference in the code as we
176  * are parsing on objcopy output of text. Use a variable for
177  * it instead.
178  */
179 static unsigned long mcount_addr = MCOUNT_ADDR;
180
181 static struct task_struct *ftraced_task;
182
183 enum {
184         FTRACE_ENABLE_CALLS             = (1 << 0),
185         FTRACE_DISABLE_CALLS            = (1 << 1),
186         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
187         FTRACE_ENABLE_MCOUNT            = (1 << 3),
188         FTRACE_DISABLE_MCOUNT           = (1 << 4),
189 };
190
191 static int ftrace_filtered;
192 static int tracing_on;
193 static int frozen_record_count;
194
195 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
196
197 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
198
199 static DEFINE_MUTEX(ftraced_lock);
200 static DEFINE_MUTEX(ftrace_regex_lock);
201
202 struct ftrace_page {
203         struct ftrace_page      *next;
204         unsigned long           index;
205         struct dyn_ftrace       records[];
206 };
207
208 #define ENTRIES_PER_PAGE \
209   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
210
211 /* estimate from running different kernels */
212 #define NR_TO_INIT              10000
213
214 static struct ftrace_page       *ftrace_pages_start;
215 static struct ftrace_page       *ftrace_pages;
216
217 static int ftraced_trigger;
218 static int ftraced_suspend;
219 static int ftraced_stop;
220
221 static int ftrace_record_suspend;
222
223 static struct dyn_ftrace *ftrace_free_records;
224
225
226 #ifdef CONFIG_KPROBES
227 static inline void freeze_record(struct dyn_ftrace *rec)
228 {
229         if (!(rec->flags & FTRACE_FL_FROZEN)) {
230                 rec->flags |= FTRACE_FL_FROZEN;
231                 frozen_record_count++;
232         }
233 }
234
235 static inline void unfreeze_record(struct dyn_ftrace *rec)
236 {
237         if (rec->flags & FTRACE_FL_FROZEN) {
238                 rec->flags &= ~FTRACE_FL_FROZEN;
239                 frozen_record_count--;
240         }
241 }
242
243 static inline int record_frozen(struct dyn_ftrace *rec)
244 {
245         return rec->flags & FTRACE_FL_FROZEN;
246 }
247 #else
248 # define freeze_record(rec)                     ({ 0; })
249 # define unfreeze_record(rec)                   ({ 0; })
250 # define record_frozen(rec)                     ({ 0; })
251 #endif /* CONFIG_KPROBES */
252
253 int skip_trace(unsigned long ip)
254 {
255         unsigned long fl;
256         struct dyn_ftrace *rec;
257         struct hlist_node *t;
258         struct hlist_head *head;
259
260         if (frozen_record_count == 0)
261                 return 0;
262
263         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
264         hlist_for_each_entry_rcu(rec, t, head, node) {
265                 if (rec->ip == ip) {
266                         if (record_frozen(rec)) {
267                                 if (rec->flags & FTRACE_FL_FAILED)
268                                         return 1;
269
270                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
271                                         return 1;
272
273                                 if (!tracing_on || !ftrace_enabled)
274                                         return 1;
275
276                                 if (ftrace_filtered) {
277                                         fl = rec->flags & (FTRACE_FL_FILTER |
278                                                            FTRACE_FL_NOTRACE);
279                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
280                                                 return 1;
281                                 }
282                         }
283                         break;
284                 }
285         }
286
287         return 0;
288 }
289
290 static inline int
291 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
292 {
293         struct dyn_ftrace *p;
294         struct hlist_node *t;
295         int found = 0;
296
297         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
298                 if (p->ip == ip) {
299                         found = 1;
300                         break;
301                 }
302         }
303
304         return found;
305 }
306
307 static inline void
308 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
309 {
310         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
311 }
312
313 /* called from kstop_machine */
314 static inline void ftrace_del_hash(struct dyn_ftrace *node)
315 {
316         hlist_del(&node->node);
317 }
318
319 static void ftrace_free_rec(struct dyn_ftrace *rec)
320 {
321         rec->ip = (unsigned long)ftrace_free_records;
322         ftrace_free_records = rec;
323         rec->flags |= FTRACE_FL_FREE;
324 }
325
326 void ftrace_release(void *start, unsigned long size)
327 {
328         struct dyn_ftrace *rec;
329         struct ftrace_page *pg;
330         unsigned long s = (unsigned long)start;
331         unsigned long e = s + size;
332         int i;
333
334         if (ftrace_disabled || !start)
335                 return;
336
337         /* should not be called from interrupt context */
338         spin_lock(&ftrace_lock);
339
340         for (pg = ftrace_pages_start; pg; pg = pg->next) {
341                 for (i = 0; i < pg->index; i++) {
342                         rec = &pg->records[i];
343
344                         if ((rec->ip >= s) && (rec->ip < e))
345                                 ftrace_free_rec(rec);
346                 }
347         }
348         spin_unlock(&ftrace_lock);
349
350 }
351
352 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
353 {
354         struct dyn_ftrace *rec;
355
356         /* First check for freed records */
357         if (ftrace_free_records) {
358                 rec = ftrace_free_records;
359
360                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
361                         WARN_ON_ONCE(1);
362                         ftrace_free_records = NULL;
363                         ftrace_disabled = 1;
364                         ftrace_enabled = 0;
365                         return NULL;
366                 }
367
368                 ftrace_free_records = (void *)rec->ip;
369                 memset(rec, 0, sizeof(*rec));
370                 return rec;
371         }
372
373         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
374                 if (!ftrace_pages->next)
375                         return NULL;
376                 ftrace_pages = ftrace_pages->next;
377         }
378
379         return &ftrace_pages->records[ftrace_pages->index++];
380 }
381
382 static void
383 ftrace_record_ip(unsigned long ip)
384 {
385         struct dyn_ftrace *node;
386         unsigned long flags;
387         unsigned long key;
388         int resched;
389         int cpu;
390
391         if (!ftrace_enabled || ftrace_disabled)
392                 return;
393
394         resched = need_resched();
395         preempt_disable_notrace();
396
397         /*
398          * We simply need to protect against recursion.
399          * Use the the raw version of smp_processor_id and not
400          * __get_cpu_var which can call debug hooks that can
401          * cause a recursive crash here.
402          */
403         cpu = raw_smp_processor_id();
404         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
405         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
406                 goto out;
407
408         if (unlikely(ftrace_record_suspend))
409                 goto out;
410
411         key = hash_long(ip, FTRACE_HASHBITS);
412
413         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
414
415         if (ftrace_ip_in_hash(ip, key))
416                 goto out;
417
418         ftrace_hash_lock(flags);
419
420         /* This ip may have hit the hash before the lock */
421         if (ftrace_ip_in_hash(ip, key))
422                 goto out_unlock;
423
424         node = ftrace_alloc_dyn_node(ip);
425         if (!node)
426                 goto out_unlock;
427
428         node->ip = ip;
429
430         ftrace_add_hash(node, key);
431
432         ftraced_trigger = 1;
433
434  out_unlock:
435         ftrace_hash_unlock(flags);
436  out:
437         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
438
439         /* prevent recursion with scheduler */
440         if (resched)
441                 preempt_enable_no_resched_notrace();
442         else
443                 preempt_enable_notrace();
444 }
445
446 #define FTRACE_ADDR ((long)(ftrace_caller))
447
448 static int
449 __ftrace_replace_code(struct dyn_ftrace *rec,
450                       unsigned char *old, unsigned char *new, int enable)
451 {
452         unsigned long ip, fl;
453
454         ip = rec->ip;
455
456         if (ftrace_filtered && enable) {
457                 /*
458                  * If filtering is on:
459                  *
460                  * If this record is set to be filtered and
461                  * is enabled then do nothing.
462                  *
463                  * If this record is set to be filtered and
464                  * it is not enabled, enable it.
465                  *
466                  * If this record is not set to be filtered
467                  * and it is not enabled do nothing.
468                  *
469                  * If this record is set not to trace then
470                  * do nothing.
471                  *
472                  * If this record is set not to trace and
473                  * it is enabled then disable it.
474                  *
475                  * If this record is not set to be filtered and
476                  * it is enabled, disable it.
477                  */
478
479                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
480                                    FTRACE_FL_ENABLED);
481
482                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
483                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
484                     !fl || (fl == FTRACE_FL_NOTRACE))
485                         return 0;
486
487                 /*
488                  * If it is enabled disable it,
489                  * otherwise enable it!
490                  */
491                 if (fl & FTRACE_FL_ENABLED) {
492                         /* swap new and old */
493                         new = old;
494                         old = ftrace_call_replace(ip, FTRACE_ADDR);
495                         rec->flags &= ~FTRACE_FL_ENABLED;
496                 } else {
497                         new = ftrace_call_replace(ip, FTRACE_ADDR);
498                         rec->flags |= FTRACE_FL_ENABLED;
499                 }
500         } else {
501
502                 if (enable) {
503                         /*
504                          * If this record is set not to trace and is
505                          * not enabled, do nothing.
506                          */
507                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
508                         if (fl == FTRACE_FL_NOTRACE)
509                                 return 0;
510
511                         new = ftrace_call_replace(ip, FTRACE_ADDR);
512                 } else
513                         old = ftrace_call_replace(ip, FTRACE_ADDR);
514
515                 if (enable) {
516                         if (rec->flags & FTRACE_FL_ENABLED)
517                                 return 0;
518                         rec->flags |= FTRACE_FL_ENABLED;
519                 } else {
520                         if (!(rec->flags & FTRACE_FL_ENABLED))
521                                 return 0;
522                         rec->flags &= ~FTRACE_FL_ENABLED;
523                 }
524         }
525
526         return ftrace_modify_code(ip, old, new);
527 }
528
529 static void ftrace_replace_code(int enable)
530 {
531         int i, failed;
532         unsigned char *new = NULL, *old = NULL;
533         struct dyn_ftrace *rec;
534         struct ftrace_page *pg;
535
536         if (enable)
537                 old = ftrace_nop_replace();
538         else
539                 new = ftrace_nop_replace();
540
541         for (pg = ftrace_pages_start; pg; pg = pg->next) {
542                 for (i = 0; i < pg->index; i++) {
543                         rec = &pg->records[i];
544
545                         /* don't modify code that has already faulted */
546                         if (rec->flags & FTRACE_FL_FAILED)
547                                 continue;
548
549                         /* ignore updates to this record's mcount site */
550                         if (get_kprobe((void *)rec->ip)) {
551                                 freeze_record(rec);
552                                 continue;
553                         } else {
554                                 unfreeze_record(rec);
555                         }
556
557                         failed = __ftrace_replace_code(rec, old, new, enable);
558                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
559                                 rec->flags |= FTRACE_FL_FAILED;
560                                 if ((system_state == SYSTEM_BOOTING) ||
561                                     !core_kernel_text(rec->ip)) {
562                                         ftrace_del_hash(rec);
563                                         ftrace_free_rec(rec);
564                                 }
565                         }
566                 }
567         }
568 }
569
570 static void ftrace_shutdown_replenish(void)
571 {
572         if (ftrace_pages->next)
573                 return;
574
575         /* allocate another page */
576         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
577 }
578
579 static void print_ip_ins(const char *fmt, unsigned char *p)
580 {
581         int i;
582
583         printk(KERN_CONT "%s", fmt);
584
585         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
586                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
587 }
588
589 static int
590 ftrace_code_disable(struct dyn_ftrace *rec)
591 {
592         unsigned long ip;
593         unsigned char *nop, *call;
594         int failed;
595
596         ip = rec->ip;
597
598         nop = ftrace_nop_replace();
599         call = ftrace_call_replace(ip, mcount_addr);
600
601         failed = ftrace_modify_code(ip, call, nop);
602         if (failed) {
603                 switch (failed) {
604                 case 1:
605                         WARN_ON_ONCE(1);
606                         pr_info("ftrace faulted on modifying ");
607                         print_ip_sym(ip);
608                         break;
609                 case 2:
610                         WARN_ON_ONCE(1);
611                         pr_info("ftrace failed to modify ");
612                         print_ip_sym(ip);
613                         print_ip_ins(" expected: ", call);
614                         print_ip_ins(" actual: ", (unsigned char *)ip);
615                         print_ip_ins(" replace: ", nop);
616                         printk(KERN_CONT "\n");
617                         break;
618                 }
619
620                 rec->flags |= FTRACE_FL_FAILED;
621                 return 0;
622         }
623         return 1;
624 }
625
626 static int __ftrace_update_code(void *ignore);
627
628 static int __ftrace_modify_code(void *data)
629 {
630         unsigned long addr;
631         int *command = data;
632
633         if (*command & FTRACE_ENABLE_CALLS) {
634                 /*
635                  * Update any recorded ips now that we have the
636                  * machine stopped
637                  */
638                 __ftrace_update_code(NULL);
639                 ftrace_replace_code(1);
640                 tracing_on = 1;
641         } else if (*command & FTRACE_DISABLE_CALLS) {
642                 ftrace_replace_code(0);
643                 tracing_on = 0;
644         }
645
646         if (*command & FTRACE_UPDATE_TRACE_FUNC)
647                 ftrace_update_ftrace_func(ftrace_trace_function);
648
649         if (*command & FTRACE_ENABLE_MCOUNT) {
650                 addr = (unsigned long)ftrace_record_ip;
651                 ftrace_mcount_set(&addr);
652         } else if (*command & FTRACE_DISABLE_MCOUNT) {
653                 addr = (unsigned long)ftrace_stub;
654                 ftrace_mcount_set(&addr);
655         }
656
657         return 0;
658 }
659
660 static void ftrace_run_update_code(int command)
661 {
662         stop_machine(__ftrace_modify_code, &command, NULL);
663 }
664
665 void ftrace_disable_daemon(void)
666 {
667         /* Stop the daemon from calling kstop_machine */
668         mutex_lock(&ftraced_lock);
669         ftraced_stop = 1;
670         mutex_unlock(&ftraced_lock);
671
672         ftrace_force_update();
673 }
674
675 void ftrace_enable_daemon(void)
676 {
677         mutex_lock(&ftraced_lock);
678         ftraced_stop = 0;
679         mutex_unlock(&ftraced_lock);
680
681         ftrace_force_update();
682 }
683
684 static ftrace_func_t saved_ftrace_func;
685
686 static void ftrace_startup(void)
687 {
688         int command = 0;
689
690         if (unlikely(ftrace_disabled))
691                 return;
692
693         mutex_lock(&ftraced_lock);
694         ftraced_suspend++;
695         if (ftraced_suspend == 1)
696                 command |= FTRACE_ENABLE_CALLS;
697
698         if (saved_ftrace_func != ftrace_trace_function) {
699                 saved_ftrace_func = ftrace_trace_function;
700                 command |= FTRACE_UPDATE_TRACE_FUNC;
701         }
702
703         if (!command || !ftrace_enabled)
704                 goto out;
705
706         ftrace_run_update_code(command);
707  out:
708         mutex_unlock(&ftraced_lock);
709 }
710
711 static void ftrace_shutdown(void)
712 {
713         int command = 0;
714
715         if (unlikely(ftrace_disabled))
716                 return;
717
718         mutex_lock(&ftraced_lock);
719         ftraced_suspend--;
720         if (!ftraced_suspend)
721                 command |= FTRACE_DISABLE_CALLS;
722
723         if (saved_ftrace_func != ftrace_trace_function) {
724                 saved_ftrace_func = ftrace_trace_function;
725                 command |= FTRACE_UPDATE_TRACE_FUNC;
726         }
727
728         if (!command || !ftrace_enabled)
729                 goto out;
730
731         ftrace_run_update_code(command);
732  out:
733         mutex_unlock(&ftraced_lock);
734 }
735
736 static void ftrace_startup_sysctl(void)
737 {
738         int command = FTRACE_ENABLE_MCOUNT;
739
740         if (unlikely(ftrace_disabled))
741                 return;
742
743         mutex_lock(&ftraced_lock);
744         /* Force update next time */
745         saved_ftrace_func = NULL;
746         /* ftraced_suspend is true if we want ftrace running */
747         if (ftraced_suspend)
748                 command |= FTRACE_ENABLE_CALLS;
749
750         ftrace_run_update_code(command);
751         mutex_unlock(&ftraced_lock);
752 }
753
754 static void ftrace_shutdown_sysctl(void)
755 {
756         int command = FTRACE_DISABLE_MCOUNT;
757
758         if (unlikely(ftrace_disabled))
759                 return;
760
761         mutex_lock(&ftraced_lock);
762         /* ftraced_suspend is true if ftrace is running */
763         if (ftraced_suspend)
764                 command |= FTRACE_DISABLE_CALLS;
765
766         ftrace_run_update_code(command);
767         mutex_unlock(&ftraced_lock);
768 }
769
770 static cycle_t          ftrace_update_time;
771 static unsigned long    ftrace_update_cnt;
772 unsigned long           ftrace_update_tot_cnt;
773
774 static int __ftrace_update_code(void *ignore)
775 {
776         int i, save_ftrace_enabled;
777         cycle_t start, stop;
778         struct dyn_ftrace *p;
779         struct hlist_node *t, *n;
780         struct hlist_head *head, temp_list;
781
782         /* Don't be recording funcs now */
783         ftrace_record_suspend++;
784         save_ftrace_enabled = ftrace_enabled;
785         ftrace_enabled = 0;
786
787         start = ftrace_now(raw_smp_processor_id());
788         ftrace_update_cnt = 0;
789
790         /* No locks needed, the machine is stopped! */
791         for (i = 0; i < FTRACE_HASHSIZE; i++) {
792                 INIT_HLIST_HEAD(&temp_list);
793                 head = &ftrace_hash[i];
794
795                 /* all CPUS are stopped, we are safe to modify code */
796                 hlist_for_each_entry_safe(p, t, n, head, node) {
797                         /* Skip over failed records which have not been
798                          * freed. */
799                         if (p->flags & FTRACE_FL_FAILED)
800                                 continue;
801
802                         /* Unconverted records are always at the head of the
803                          * hash bucket. Once we encounter a converted record,
804                          * simply skip over to the next bucket. Saves ftraced
805                          * some processor cycles (ftrace does its bid for
806                          * global warming :-p ). */
807                         if (p->flags & (FTRACE_FL_CONVERTED))
808                                 break;
809
810                         /* Ignore updates to this record's mcount site.
811                          * Reintroduce this record at the head of this
812                          * bucket to attempt to "convert" it again if
813                          * the kprobe on it is unregistered before the
814                          * next run. */
815                         if (get_kprobe((void *)p->ip)) {
816                                 ftrace_del_hash(p);
817                                 INIT_HLIST_NODE(&p->node);
818                                 hlist_add_head(&p->node, &temp_list);
819                                 freeze_record(p);
820                                 continue;
821                         } else {
822                                 unfreeze_record(p);
823                         }
824
825                         /* convert record (i.e, patch mcount-call with NOP) */
826                         if (ftrace_code_disable(p)) {
827                                 p->flags |= FTRACE_FL_CONVERTED;
828                                 ftrace_update_cnt++;
829                         } else {
830                                 if ((system_state == SYSTEM_BOOTING) ||
831                                     !core_kernel_text(p->ip)) {
832                                         ftrace_del_hash(p);
833                                         ftrace_free_rec(p);
834                                 }
835                         }
836                 }
837
838                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
839                         hlist_del(&p->node);
840                         INIT_HLIST_NODE(&p->node);
841                         hlist_add_head(&p->node, head);
842                 }
843         }
844
845         stop = ftrace_now(raw_smp_processor_id());
846         ftrace_update_time = stop - start;
847         ftrace_update_tot_cnt += ftrace_update_cnt;
848         ftraced_trigger = 0;
849
850         ftrace_enabled = save_ftrace_enabled;
851         ftrace_record_suspend--;
852
853         return 0;
854 }
855
856 static int ftrace_update_code(void)
857 {
858         if (unlikely(ftrace_disabled) ||
859             !ftrace_enabled || !ftraced_trigger)
860                 return 0;
861
862         stop_machine(__ftrace_update_code, NULL, NULL);
863
864         return 1;
865 }
866
867 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
868 {
869         struct ftrace_page *pg;
870         int cnt;
871         int i;
872
873         /* allocate a few pages */
874         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
875         if (!ftrace_pages_start)
876                 return -1;
877
878         /*
879          * Allocate a few more pages.
880          *
881          * TODO: have some parser search vmlinux before
882          *   final linking to find all calls to ftrace.
883          *   Then we can:
884          *    a) know how many pages to allocate.
885          *     and/or
886          *    b) set up the table then.
887          *
888          *  The dynamic code is still necessary for
889          *  modules.
890          */
891
892         pg = ftrace_pages = ftrace_pages_start;
893
894         cnt = num_to_init / ENTRIES_PER_PAGE;
895         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
896                 num_to_init, cnt);
897
898         for (i = 0; i < cnt; i++) {
899                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
900
901                 /* If we fail, we'll try later anyway */
902                 if (!pg->next)
903                         break;
904
905                 pg = pg->next;
906         }
907
908         return 0;
909 }
910
911 enum {
912         FTRACE_ITER_FILTER      = (1 << 0),
913         FTRACE_ITER_CONT        = (1 << 1),
914         FTRACE_ITER_NOTRACE     = (1 << 2),
915         FTRACE_ITER_FAILURES    = (1 << 3),
916 };
917
918 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
919
920 struct ftrace_iterator {
921         loff_t                  pos;
922         struct ftrace_page      *pg;
923         unsigned                idx;
924         unsigned                flags;
925         unsigned char           buffer[FTRACE_BUFF_MAX+1];
926         unsigned                buffer_idx;
927         unsigned                filtered;
928 };
929
930 static void *
931 t_next(struct seq_file *m, void *v, loff_t *pos)
932 {
933         struct ftrace_iterator *iter = m->private;
934         struct dyn_ftrace *rec = NULL;
935
936         (*pos)++;
937
938         /* should not be called from interrupt context */
939         spin_lock(&ftrace_lock);
940  retry:
941         if (iter->idx >= iter->pg->index) {
942                 if (iter->pg->next) {
943                         iter->pg = iter->pg->next;
944                         iter->idx = 0;
945                         goto retry;
946                 }
947         } else {
948                 rec = &iter->pg->records[iter->idx++];
949                 if ((rec->flags & FTRACE_FL_FREE) ||
950
951                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
952                      (rec->flags & FTRACE_FL_FAILED)) ||
953
954                     ((iter->flags & FTRACE_ITER_FAILURES) &&
955                      !(rec->flags & FTRACE_FL_FAILED)) ||
956
957                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
958                      !(rec->flags & FTRACE_FL_NOTRACE))) {
959                         rec = NULL;
960                         goto retry;
961                 }
962         }
963         spin_unlock(&ftrace_lock);
964
965         iter->pos = *pos;
966
967         return rec;
968 }
969
970 static void *t_start(struct seq_file *m, loff_t *pos)
971 {
972         struct ftrace_iterator *iter = m->private;
973         void *p = NULL;
974         loff_t l = -1;
975
976         if (*pos != iter->pos) {
977                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
978                         ;
979         } else {
980                 l = *pos;
981                 p = t_next(m, p, &l);
982         }
983
984         return p;
985 }
986
987 static void t_stop(struct seq_file *m, void *p)
988 {
989 }
990
991 static int t_show(struct seq_file *m, void *v)
992 {
993         struct dyn_ftrace *rec = v;
994         char str[KSYM_SYMBOL_LEN];
995
996         if (!rec)
997                 return 0;
998
999         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1000
1001         seq_printf(m, "%s\n", str);
1002
1003         return 0;
1004 }
1005
1006 static struct seq_operations show_ftrace_seq_ops = {
1007         .start = t_start,
1008         .next = t_next,
1009         .stop = t_stop,
1010         .show = t_show,
1011 };
1012
1013 static int
1014 ftrace_avail_open(struct inode *inode, struct file *file)
1015 {
1016         struct ftrace_iterator *iter;
1017         int ret;
1018
1019         if (unlikely(ftrace_disabled))
1020                 return -ENODEV;
1021
1022         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1023         if (!iter)
1024                 return -ENOMEM;
1025
1026         iter->pg = ftrace_pages_start;
1027         iter->pos = -1;
1028
1029         ret = seq_open(file, &show_ftrace_seq_ops);
1030         if (!ret) {
1031                 struct seq_file *m = file->private_data;
1032
1033                 m->private = iter;
1034         } else {
1035                 kfree(iter);
1036         }
1037
1038         return ret;
1039 }
1040
1041 int ftrace_avail_release(struct inode *inode, struct file *file)
1042 {
1043         struct seq_file *m = (struct seq_file *)file->private_data;
1044         struct ftrace_iterator *iter = m->private;
1045
1046         seq_release(inode, file);
1047         kfree(iter);
1048
1049         return 0;
1050 }
1051
1052 static int
1053 ftrace_failures_open(struct inode *inode, struct file *file)
1054 {
1055         int ret;
1056         struct seq_file *m;
1057         struct ftrace_iterator *iter;
1058
1059         ret = ftrace_avail_open(inode, file);
1060         if (!ret) {
1061                 m = (struct seq_file *)file->private_data;
1062                 iter = (struct ftrace_iterator *)m->private;
1063                 iter->flags = FTRACE_ITER_FAILURES;
1064         }
1065
1066         return ret;
1067 }
1068
1069
1070 static void ftrace_filter_reset(int enable)
1071 {
1072         struct ftrace_page *pg;
1073         struct dyn_ftrace *rec;
1074         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1075         unsigned i;
1076
1077         /* should not be called from interrupt context */
1078         spin_lock(&ftrace_lock);
1079         if (enable)
1080                 ftrace_filtered = 0;
1081         pg = ftrace_pages_start;
1082         while (pg) {
1083                 for (i = 0; i < pg->index; i++) {
1084                         rec = &pg->records[i];
1085                         if (rec->flags & FTRACE_FL_FAILED)
1086                                 continue;
1087                         rec->flags &= ~type;
1088                 }
1089                 pg = pg->next;
1090         }
1091         spin_unlock(&ftrace_lock);
1092 }
1093
1094 static int
1095 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1096 {
1097         struct ftrace_iterator *iter;
1098         int ret = 0;
1099
1100         if (unlikely(ftrace_disabled))
1101                 return -ENODEV;
1102
1103         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1104         if (!iter)
1105                 return -ENOMEM;
1106
1107         mutex_lock(&ftrace_regex_lock);
1108         if ((file->f_mode & FMODE_WRITE) &&
1109             !(file->f_flags & O_APPEND))
1110                 ftrace_filter_reset(enable);
1111
1112         if (file->f_mode & FMODE_READ) {
1113                 iter->pg = ftrace_pages_start;
1114                 iter->pos = -1;
1115                 iter->flags = enable ? FTRACE_ITER_FILTER :
1116                         FTRACE_ITER_NOTRACE;
1117
1118                 ret = seq_open(file, &show_ftrace_seq_ops);
1119                 if (!ret) {
1120                         struct seq_file *m = file->private_data;
1121                         m->private = iter;
1122                 } else
1123                         kfree(iter);
1124         } else
1125                 file->private_data = iter;
1126         mutex_unlock(&ftrace_regex_lock);
1127
1128         return ret;
1129 }
1130
1131 static int
1132 ftrace_filter_open(struct inode *inode, struct file *file)
1133 {
1134         return ftrace_regex_open(inode, file, 1);
1135 }
1136
1137 static int
1138 ftrace_notrace_open(struct inode *inode, struct file *file)
1139 {
1140         return ftrace_regex_open(inode, file, 0);
1141 }
1142
1143 static ssize_t
1144 ftrace_regex_read(struct file *file, char __user *ubuf,
1145                        size_t cnt, loff_t *ppos)
1146 {
1147         if (file->f_mode & FMODE_READ)
1148                 return seq_read(file, ubuf, cnt, ppos);
1149         else
1150                 return -EPERM;
1151 }
1152
1153 static loff_t
1154 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1155 {
1156         loff_t ret;
1157
1158         if (file->f_mode & FMODE_READ)
1159                 ret = seq_lseek(file, offset, origin);
1160         else
1161                 file->f_pos = ret = 1;
1162
1163         return ret;
1164 }
1165
1166 enum {
1167         MATCH_FULL,
1168         MATCH_FRONT_ONLY,
1169         MATCH_MIDDLE_ONLY,
1170         MATCH_END_ONLY,
1171 };
1172
1173 static void
1174 ftrace_match(unsigned char *buff, int len, int enable)
1175 {
1176         char str[KSYM_SYMBOL_LEN];
1177         char *search = NULL;
1178         struct ftrace_page *pg;
1179         struct dyn_ftrace *rec;
1180         int type = MATCH_FULL;
1181         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1182         unsigned i, match = 0, search_len = 0;
1183
1184         for (i = 0; i < len; i++) {
1185                 if (buff[i] == '*') {
1186                         if (!i) {
1187                                 search = buff + i + 1;
1188                                 type = MATCH_END_ONLY;
1189                                 search_len = len - (i + 1);
1190                         } else {
1191                                 if (type == MATCH_END_ONLY) {
1192                                         type = MATCH_MIDDLE_ONLY;
1193                                 } else {
1194                                         match = i;
1195                                         type = MATCH_FRONT_ONLY;
1196                                 }
1197                                 buff[i] = 0;
1198                                 break;
1199                         }
1200                 }
1201         }
1202
1203         /* should not be called from interrupt context */
1204         spin_lock(&ftrace_lock);
1205         if (enable)
1206                 ftrace_filtered = 1;
1207         pg = ftrace_pages_start;
1208         while (pg) {
1209                 for (i = 0; i < pg->index; i++) {
1210                         int matched = 0;
1211                         char *ptr;
1212
1213                         rec = &pg->records[i];
1214                         if (rec->flags & FTRACE_FL_FAILED)
1215                                 continue;
1216                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1217                         switch (type) {
1218                         case MATCH_FULL:
1219                                 if (strcmp(str, buff) == 0)
1220                                         matched = 1;
1221                                 break;
1222                         case MATCH_FRONT_ONLY:
1223                                 if (memcmp(str, buff, match) == 0)
1224                                         matched = 1;
1225                                 break;
1226                         case MATCH_MIDDLE_ONLY:
1227                                 if (strstr(str, search))
1228                                         matched = 1;
1229                                 break;
1230                         case MATCH_END_ONLY:
1231                                 ptr = strstr(str, search);
1232                                 if (ptr && (ptr[search_len] == 0))
1233                                         matched = 1;
1234                                 break;
1235                         }
1236                         if (matched)
1237                                 rec->flags |= flag;
1238                 }
1239                 pg = pg->next;
1240         }
1241         spin_unlock(&ftrace_lock);
1242 }
1243
1244 static ssize_t
1245 ftrace_regex_write(struct file *file, const char __user *ubuf,
1246                    size_t cnt, loff_t *ppos, int enable)
1247 {
1248         struct ftrace_iterator *iter;
1249         char ch;
1250         size_t read = 0;
1251         ssize_t ret;
1252
1253         if (!cnt || cnt < 0)
1254                 return 0;
1255
1256         mutex_lock(&ftrace_regex_lock);
1257
1258         if (file->f_mode & FMODE_READ) {
1259                 struct seq_file *m = file->private_data;
1260                 iter = m->private;
1261         } else
1262                 iter = file->private_data;
1263
1264         if (!*ppos) {
1265                 iter->flags &= ~FTRACE_ITER_CONT;
1266                 iter->buffer_idx = 0;
1267         }
1268
1269         ret = get_user(ch, ubuf++);
1270         if (ret)
1271                 goto out;
1272         read++;
1273         cnt--;
1274
1275         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1276                 /* skip white space */
1277                 while (cnt && isspace(ch)) {
1278                         ret = get_user(ch, ubuf++);
1279                         if (ret)
1280                                 goto out;
1281                         read++;
1282                         cnt--;
1283                 }
1284
1285                 if (isspace(ch)) {
1286                         file->f_pos += read;
1287                         ret = read;
1288                         goto out;
1289                 }
1290
1291                 iter->buffer_idx = 0;
1292         }
1293
1294         while (cnt && !isspace(ch)) {
1295                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1296                         iter->buffer[iter->buffer_idx++] = ch;
1297                 else {
1298                         ret = -EINVAL;
1299                         goto out;
1300                 }
1301                 ret = get_user(ch, ubuf++);
1302                 if (ret)
1303                         goto out;
1304                 read++;
1305                 cnt--;
1306         }
1307
1308         if (isspace(ch)) {
1309                 iter->filtered++;
1310                 iter->buffer[iter->buffer_idx] = 0;
1311                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1312                 iter->buffer_idx = 0;
1313         } else
1314                 iter->flags |= FTRACE_ITER_CONT;
1315
1316
1317         file->f_pos += read;
1318
1319         ret = read;
1320  out:
1321         mutex_unlock(&ftrace_regex_lock);
1322
1323         return ret;
1324 }
1325
1326 static ssize_t
1327 ftrace_filter_write(struct file *file, const char __user *ubuf,
1328                     size_t cnt, loff_t *ppos)
1329 {
1330         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1331 }
1332
1333 static ssize_t
1334 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1335                      size_t cnt, loff_t *ppos)
1336 {
1337         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1338 }
1339
1340 static void
1341 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1342 {
1343         if (unlikely(ftrace_disabled))
1344                 return;
1345
1346         mutex_lock(&ftrace_regex_lock);
1347         if (reset)
1348                 ftrace_filter_reset(enable);
1349         if (buf)
1350                 ftrace_match(buf, len, enable);
1351         mutex_unlock(&ftrace_regex_lock);
1352 }
1353
1354 /**
1355  * ftrace_set_filter - set a function to filter on in ftrace
1356  * @buf - the string that holds the function filter text.
1357  * @len - the length of the string.
1358  * @reset - non zero to reset all filters before applying this filter.
1359  *
1360  * Filters denote which functions should be enabled when tracing is enabled.
1361  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1362  */
1363 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1364 {
1365         ftrace_set_regex(buf, len, reset, 1);
1366 }
1367
1368 /**
1369  * ftrace_set_notrace - set a function to not trace in ftrace
1370  * @buf - the string that holds the function notrace text.
1371  * @len - the length of the string.
1372  * @reset - non zero to reset all filters before applying this filter.
1373  *
1374  * Notrace Filters denote which functions should not be enabled when tracing
1375  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1376  * for tracing.
1377  */
1378 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1379 {
1380         ftrace_set_regex(buf, len, reset, 0);
1381 }
1382
1383 static int
1384 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1385 {
1386         struct seq_file *m = (struct seq_file *)file->private_data;
1387         struct ftrace_iterator *iter;
1388
1389         mutex_lock(&ftrace_regex_lock);
1390         if (file->f_mode & FMODE_READ) {
1391                 iter = m->private;
1392
1393                 seq_release(inode, file);
1394         } else
1395                 iter = file->private_data;
1396
1397         if (iter->buffer_idx) {
1398                 iter->filtered++;
1399                 iter->buffer[iter->buffer_idx] = 0;
1400                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1401         }
1402
1403         mutex_lock(&ftrace_sysctl_lock);
1404         mutex_lock(&ftraced_lock);
1405         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1406                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1407         mutex_unlock(&ftraced_lock);
1408         mutex_unlock(&ftrace_sysctl_lock);
1409
1410         kfree(iter);
1411         mutex_unlock(&ftrace_regex_lock);
1412         return 0;
1413 }
1414
1415 static int
1416 ftrace_filter_release(struct inode *inode, struct file *file)
1417 {
1418         return ftrace_regex_release(inode, file, 1);
1419 }
1420
1421 static int
1422 ftrace_notrace_release(struct inode *inode, struct file *file)
1423 {
1424         return ftrace_regex_release(inode, file, 0);
1425 }
1426
1427 static ssize_t
1428 ftraced_read(struct file *filp, char __user *ubuf,
1429                      size_t cnt, loff_t *ppos)
1430 {
1431         /* don't worry about races */
1432         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1433         int r = strlen(buf);
1434
1435         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1436 }
1437
1438 static ssize_t
1439 ftraced_write(struct file *filp, const char __user *ubuf,
1440                       size_t cnt, loff_t *ppos)
1441 {
1442         char buf[64];
1443         long val;
1444         int ret;
1445
1446         if (cnt >= sizeof(buf))
1447                 return -EINVAL;
1448
1449         if (copy_from_user(&buf, ubuf, cnt))
1450                 return -EFAULT;
1451
1452         if (strncmp(buf, "enable", 6) == 0)
1453                 val = 1;
1454         else if (strncmp(buf, "disable", 7) == 0)
1455                 val = 0;
1456         else {
1457                 buf[cnt] = 0;
1458
1459                 ret = strict_strtoul(buf, 10, &val);
1460                 if (ret < 0)
1461                         return ret;
1462
1463                 val = !!val;
1464         }
1465
1466         if (val)
1467                 ftrace_enable_daemon();
1468         else
1469                 ftrace_disable_daemon();
1470
1471         filp->f_pos += cnt;
1472
1473         return cnt;
1474 }
1475
1476 static struct file_operations ftrace_avail_fops = {
1477         .open = ftrace_avail_open,
1478         .read = seq_read,
1479         .llseek = seq_lseek,
1480         .release = ftrace_avail_release,
1481 };
1482
1483 static struct file_operations ftrace_failures_fops = {
1484         .open = ftrace_failures_open,
1485         .read = seq_read,
1486         .llseek = seq_lseek,
1487         .release = ftrace_avail_release,
1488 };
1489
1490 static struct file_operations ftrace_filter_fops = {
1491         .open = ftrace_filter_open,
1492         .read = ftrace_regex_read,
1493         .write = ftrace_filter_write,
1494         .llseek = ftrace_regex_lseek,
1495         .release = ftrace_filter_release,
1496 };
1497
1498 static struct file_operations ftrace_notrace_fops = {
1499         .open = ftrace_notrace_open,
1500         .read = ftrace_regex_read,
1501         .write = ftrace_notrace_write,
1502         .llseek = ftrace_regex_lseek,
1503         .release = ftrace_notrace_release,
1504 };
1505
1506 static struct file_operations ftraced_fops = {
1507         .open = tracing_open_generic,
1508         .read = ftraced_read,
1509         .write = ftraced_write,
1510 };
1511
1512 /**
1513  * ftrace_force_update - force an update to all recording ftrace functions
1514  */
1515 int ftrace_force_update(void)
1516 {
1517         int ret = 0;
1518
1519         if (unlikely(ftrace_disabled))
1520                 return -ENODEV;
1521
1522         mutex_lock(&ftrace_sysctl_lock);
1523         mutex_lock(&ftraced_lock);
1524
1525         /*
1526          * If ftraced_trigger is not set, then there is nothing
1527          * to update.
1528          */
1529         if (ftraced_trigger && !ftrace_update_code())
1530                 ret = -EBUSY;
1531
1532         mutex_unlock(&ftraced_lock);
1533         mutex_unlock(&ftrace_sysctl_lock);
1534
1535         return ret;
1536 }
1537
1538 static void ftrace_force_shutdown(void)
1539 {
1540         struct task_struct *task;
1541         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1542
1543         mutex_lock(&ftraced_lock);
1544         task = ftraced_task;
1545         ftraced_task = NULL;
1546         ftraced_suspend = -1;
1547         ftrace_run_update_code(command);
1548         mutex_unlock(&ftraced_lock);
1549
1550         if (task)
1551                 kthread_stop(task);
1552 }
1553
1554 static __init int ftrace_init_debugfs(void)
1555 {
1556         struct dentry *d_tracer;
1557         struct dentry *entry;
1558
1559         d_tracer = tracing_init_dentry();
1560
1561         entry = debugfs_create_file("available_filter_functions", 0444,
1562                                     d_tracer, NULL, &ftrace_avail_fops);
1563         if (!entry)
1564                 pr_warning("Could not create debugfs "
1565                            "'available_filter_functions' entry\n");
1566
1567         entry = debugfs_create_file("failures", 0444,
1568                                     d_tracer, NULL, &ftrace_failures_fops);
1569         if (!entry)
1570                 pr_warning("Could not create debugfs 'failures' entry\n");
1571
1572         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1573                                     NULL, &ftrace_filter_fops);
1574         if (!entry)
1575                 pr_warning("Could not create debugfs "
1576                            "'set_ftrace_filter' entry\n");
1577
1578         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1579                                     NULL, &ftrace_notrace_fops);
1580         if (!entry)
1581                 pr_warning("Could not create debugfs "
1582                            "'set_ftrace_notrace' entry\n");
1583
1584         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1585                                     NULL, &ftraced_fops);
1586         if (!entry)
1587                 pr_warning("Could not create debugfs "
1588                            "'ftraced_enabled' entry\n");
1589         return 0;
1590 }
1591
1592 fs_initcall(ftrace_init_debugfs);
1593
1594 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1595 static int ftrace_convert_nops(unsigned long *start,
1596                                unsigned long *end)
1597 {
1598         unsigned long *p;
1599         unsigned long addr;
1600         unsigned long flags;
1601
1602         p = start;
1603         while (p < end) {
1604                 addr = ftrace_call_adjust(*p++);
1605                 /* should not be called from interrupt context */
1606                 spin_lock(&ftrace_lock);
1607                 ftrace_record_ip(addr);
1608                 spin_unlock(&ftrace_lock);
1609                 ftrace_shutdown_replenish();
1610         }
1611
1612         /* p is ignored */
1613         local_irq_save(flags);
1614         __ftrace_update_code(p);
1615         local_irq_restore(flags);
1616
1617         return 0;
1618 }
1619
1620 void ftrace_init_module(unsigned long *start, unsigned long *end)
1621 {
1622         if (ftrace_disabled || start == end)
1623                 return;
1624         ftrace_convert_nops(start, end);
1625 }
1626
1627 extern unsigned long __start_mcount_loc[];
1628 extern unsigned long __stop_mcount_loc[];
1629
1630 void __init ftrace_init(void)
1631 {
1632         unsigned long count, addr, flags;
1633         int ret;
1634
1635         /* Keep the ftrace pointer to the stub */
1636         addr = (unsigned long)ftrace_stub;
1637
1638         local_irq_save(flags);
1639         ftrace_dyn_arch_init(&addr);
1640         local_irq_restore(flags);
1641
1642         /* ftrace_dyn_arch_init places the return code in addr */
1643         if (addr)
1644                 goto failed;
1645
1646         count = __stop_mcount_loc - __start_mcount_loc;
1647
1648         ret = ftrace_dyn_table_alloc(count);
1649         if (ret)
1650                 goto failed;
1651
1652         last_ftrace_enabled = ftrace_enabled = 1;
1653
1654         ret = ftrace_convert_nops(__start_mcount_loc,
1655                                   __stop_mcount_loc);
1656
1657         return;
1658  failed:
1659         ftrace_disabled = 1;
1660 }
1661 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1662 static int ftraced(void *ignore)
1663 {
1664         unsigned long usecs;
1665
1666         while (!kthread_should_stop()) {
1667
1668                 set_current_state(TASK_INTERRUPTIBLE);
1669
1670                 /* check once a second */
1671                 schedule_timeout(HZ);
1672
1673                 if (unlikely(ftrace_disabled))
1674                         continue;
1675
1676                 mutex_lock(&ftrace_sysctl_lock);
1677                 mutex_lock(&ftraced_lock);
1678                 if (!ftraced_suspend && !ftraced_stop &&
1679                     ftrace_update_code()) {
1680                         usecs = nsecs_to_usecs(ftrace_update_time);
1681                         if (ftrace_update_tot_cnt > 100000) {
1682                                 ftrace_update_tot_cnt = 0;
1683                                 pr_info("hm, dftrace overflow: %lu change%s"
1684                                         " (%lu total) in %lu usec%s\n",
1685                                         ftrace_update_cnt,
1686                                         ftrace_update_cnt != 1 ? "s" : "",
1687                                         ftrace_update_tot_cnt,
1688                                         usecs, usecs != 1 ? "s" : "");
1689                                 ftrace_disabled = 1;
1690                                 WARN_ON_ONCE(1);
1691                         }
1692                 }
1693                 mutex_unlock(&ftraced_lock);
1694                 mutex_unlock(&ftrace_sysctl_lock);
1695
1696                 ftrace_shutdown_replenish();
1697         }
1698         __set_current_state(TASK_RUNNING);
1699         return 0;
1700 }
1701
1702 static int __init ftrace_dynamic_init(void)
1703 {
1704         struct task_struct *p;
1705         unsigned long addr;
1706         int ret;
1707
1708         addr = (unsigned long)ftrace_record_ip;
1709
1710         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1711
1712         /* ftrace_dyn_arch_init places the return code in addr */
1713         if (addr) {
1714                 ret = (int)addr;
1715                 goto failed;
1716         }
1717
1718         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1719         if (ret)
1720                 goto failed;
1721
1722         p = kthread_run(ftraced, NULL, "ftraced");
1723         if (IS_ERR(p)) {
1724                 ret = -1;
1725                 goto failed;
1726         }
1727
1728         last_ftrace_enabled = ftrace_enabled = 1;
1729         ftraced_task = p;
1730
1731         return 0;
1732
1733  failed:
1734         ftrace_disabled = 1;
1735         return ret;
1736 }
1737
1738 core_initcall(ftrace_dynamic_init);
1739 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1740
1741 #else
1742 # define ftrace_startup()               do { } while (0)
1743 # define ftrace_shutdown()              do { } while (0)
1744 # define ftrace_startup_sysctl()        do { } while (0)
1745 # define ftrace_shutdown_sysctl()       do { } while (0)
1746 # define ftrace_force_shutdown()        do { } while (0)
1747 #endif /* CONFIG_DYNAMIC_FTRACE */
1748
1749 /**
1750  * ftrace_kill_atomic - kill ftrace from critical sections
1751  *
1752  * This function should be used by panic code. It stops ftrace
1753  * but in a not so nice way. If you need to simply kill ftrace
1754  * from a non-atomic section, use ftrace_kill.
1755  */
1756 void ftrace_kill_atomic(void)
1757 {
1758         ftrace_disabled = 1;
1759         ftrace_enabled = 0;
1760 #ifdef CONFIG_DYNAMIC_FTRACE
1761         ftraced_suspend = -1;
1762 #endif
1763         clear_ftrace_function();
1764 }
1765
1766 /**
1767  * ftrace_kill - totally shutdown ftrace
1768  *
1769  * This is a safety measure. If something was detected that seems
1770  * wrong, calling this function will keep ftrace from doing
1771  * any more modifications, and updates.
1772  * used when something went wrong.
1773  */
1774 void ftrace_kill(void)
1775 {
1776         mutex_lock(&ftrace_sysctl_lock);
1777         ftrace_disabled = 1;
1778         ftrace_enabled = 0;
1779
1780         clear_ftrace_function();
1781         mutex_unlock(&ftrace_sysctl_lock);
1782
1783         /* Try to totally disable ftrace */
1784         ftrace_force_shutdown();
1785 }
1786
1787 /**
1788  * register_ftrace_function - register a function for profiling
1789  * @ops - ops structure that holds the function for profiling.
1790  *
1791  * Register a function to be called by all functions in the
1792  * kernel.
1793  *
1794  * Note: @ops->func and all the functions it calls must be labeled
1795  *       with "notrace", otherwise it will go into a
1796  *       recursive loop.
1797  */
1798 int register_ftrace_function(struct ftrace_ops *ops)
1799 {
1800         int ret;
1801
1802         if (unlikely(ftrace_disabled))
1803                 return -1;
1804
1805         mutex_lock(&ftrace_sysctl_lock);
1806         ret = __register_ftrace_function(ops);
1807         ftrace_startup();
1808         mutex_unlock(&ftrace_sysctl_lock);
1809
1810         return ret;
1811 }
1812
1813 /**
1814  * unregister_ftrace_function - unresgister a function for profiling.
1815  * @ops - ops structure that holds the function to unregister
1816  *
1817  * Unregister a function that was added to be called by ftrace profiling.
1818  */
1819 int unregister_ftrace_function(struct ftrace_ops *ops)
1820 {
1821         int ret;
1822
1823         mutex_lock(&ftrace_sysctl_lock);
1824         ret = __unregister_ftrace_function(ops);
1825         ftrace_shutdown();
1826         mutex_unlock(&ftrace_sysctl_lock);
1827
1828         return ret;
1829 }
1830
1831 int
1832 ftrace_enable_sysctl(struct ctl_table *table, int write,
1833                      struct file *file, void __user *buffer, size_t *lenp,
1834                      loff_t *ppos)
1835 {
1836         int ret;
1837
1838         if (unlikely(ftrace_disabled))
1839                 return -ENODEV;
1840
1841         mutex_lock(&ftrace_sysctl_lock);
1842
1843         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1844
1845         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1846                 goto out;
1847
1848         last_ftrace_enabled = ftrace_enabled;
1849
1850         if (ftrace_enabled) {
1851
1852                 ftrace_startup_sysctl();
1853
1854                 /* we are starting ftrace again */
1855                 if (ftrace_list != &ftrace_list_end) {
1856                         if (ftrace_list->next == &ftrace_list_end)
1857                                 ftrace_trace_function = ftrace_list->func;
1858                         else
1859                                 ftrace_trace_function = ftrace_list_func;
1860                 }
1861
1862         } else {
1863                 /* stopping ftrace calls (just send to ftrace_stub) */
1864                 ftrace_trace_function = ftrace_stub;
1865
1866                 ftrace_shutdown_sysctl();
1867         }
1868
1869  out:
1870         mutex_unlock(&ftrace_sysctl_lock);
1871         return ret;
1872 }