}
 
 notrace void
-__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
-        unsigned long ip, unsigned long parent_ip, unsigned long flags)
+trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+              unsigned long ip, unsigned long parent_ip, unsigned long flags)
 {
        struct trace_entry *entry;
        unsigned long irq_flags;
        unsigned long ip, unsigned long parent_ip, unsigned long flags)
 {
        if (likely(!atomic_read(&data->disabled)))
-               __ftrace(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags);
 }
 
 notrace void
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               __ftrace(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags);
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);
 
                   unsigned long arg1,
                   unsigned long arg2,
                   unsigned long arg3);
+void trace_function(struct trace_array *tr,
+                   struct trace_array_cpu *data,
+                   unsigned long ip,
+                   unsigned long parent_ip,
+                   unsigned long flags);
 
 void tracing_start_function_trace(void);
 void tracing_stop_function_trace(void);
 
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               ftrace(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags);
 
        atomic_dec(&data->disabled);
 }
        if (!report_latency(delta))
                goto out_unlock;
 
-       ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
+       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
 
        latency = nsecs_to_usecs(delta);
 
        data->critical_sequence = max_sequence;
        data->preempt_timestamp = ftrace_now(cpu);
        tracing_reset(data);
-       ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
+       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
 }
 
 static inline void notrace
 
        local_save_flags(flags);
 
-       ftrace(tr, data, ip, parent_ip, flags);
+       trace_function(tr, data, ip, parent_ip, flags);
 
        __get_cpu_var(tracing_cpu) = 1;
 
 
        atomic_inc(&data->disabled);
        local_save_flags(flags);
-       ftrace(tr, data, ip, parent_ip, flags);
+       trace_function(tr, data, ip, parent_ip, flags);
        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
        data->critical_start = 0;
        atomic_dec(&data->disabled);
 
        if (unlikely(!tracer_enabled || next != wakeup_task))
                goto out_unlock;
 
-       ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
+       trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
 
        /*
         * usecs conversion is slow so we try to delay the conversion
        local_save_flags(flags);
 
        tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
-       ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
+       trace_function(tr, tr->data[wakeup_cpu],
+                      CALLER_ADDR1, CALLER_ADDR2, flags);
 
 out_locked:
        spin_unlock(&wakeup_lock);