2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #include <linux/spinlock.h>
13 #include <linux/hardirq.h>
14 #include <linux/ftrace.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
19 #include <asm/ftrace.h>
23 /* Long is fine, even if it is only 4 bytes ;-) */
24 static long *ftrace_nop;
26 union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE];
31 } __attribute__((packed));
35 static int notrace ftrace_calc_offset(long ip, long addr)
37 return (int)(addr - ip);
40 notrace unsigned char *ftrace_nop_replace(void)
42 return (char *)ftrace_nop;
45 notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
47 static union ftrace_code_union calc;
50 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
53 * No locking needed, this must be called via kstop_machine
54 * which in essence is like running on a uniprocessor machine.
60 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
61 unsigned char *new_code)
64 unsigned old = *(unsigned *)old_code; /* 4 bytes */
65 unsigned new = *(unsigned *)new_code; /* 4 bytes */
66 unsigned char newch = new_code[4];
70 * Note: Due to modules and __init, code can
71 * disappear and change, we need to protect against faulting
72 * as well as code changing.
74 * No real locking needed, this code is run through
83 ".section .fixup, \"ax\"\n"
88 : "=r"(faulted), "=a"(replaced)
89 : "r"(ip), "r"(new), "c"(newch),
90 "0"(faulted), "a"(old)
94 if (replaced != old && replaced != new)
100 notrace int ftrace_update_ftrace_func(ftrace_func_t func)
102 unsigned long ip = (unsigned long)(&ftrace_call);
103 unsigned char old[MCOUNT_INSN_SIZE], *new;
106 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
107 new = ftrace_call_replace(ip, (unsigned long)func);
108 ret = ftrace_modify_code(ip, old, new);
113 notrace int ftrace_mcount_set(unsigned long *data)
115 /* mcount is initialized as a nop */
120 int __init ftrace_dyn_arch_init(void *data)
122 extern const unsigned char ftrace_test_p6nop[];
123 extern const unsigned char ftrace_test_nop5[];
124 extern const unsigned char ftrace_test_jmp[];
128 * There is no good nop for all x86 archs.
129 * We will default to using the P6_NOP5, but first we
130 * will test to make sure that the nop will actually
131 * work on this CPU. If it faults, we will then
132 * go to a lesser efficient 5 byte nop. If that fails
133 * we then just use a jmp as our nop. This isn't the most
134 * efficient nop, but we can not use a multi part nop
135 * since we would then risk being preempted in the middle
136 * of that nop, and if we enabled tracing then, it might
137 * cause a system crash.
139 * TODO: check the cpuid to determine the best nop.
142 "jmp ftrace_test_jmp\n"
143 /* This code needs to stay around */
144 ".section .text, \"ax\"\n"
146 "jmp ftrace_test_p6nop\n"
147 ".byte 0x00,0x00,0x00\n" /* 2 byte jmp + 3 bytes */
152 ".byte 0x66,0x66,0x66,0x66,0x90\n"
156 ".section .fixup, \"ax\"\n"
158 " jmp ftrace_test_nop5\n"
162 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
163 _ASM_EXTABLE(ftrace_test_nop5, 3b)
164 : "=r"(faulted) : "0" (faulted));
168 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
169 ftrace_nop = (unsigned long *)ftrace_test_p6nop;
172 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
173 ftrace_nop = (unsigned long *)ftrace_test_nop5;
176 pr_info("ftrace: converting mcount calls to jmp 1f\n");
177 ftrace_nop = (unsigned long *)ftrace_test_jmp;
181 /* The return code is retured via data */
182 *(unsigned long *)data = 0;