Say N if unsure.
 
+config TRACING_UNLIKELY
+       bool
+       help
+         Selected by tracers that will trace the likely and unlikely
+         conditions. This prevents the tracers themselves from being
+         profiled. Profiling the tracing infrastructure can only happen
+         when the likelys and unlikelys are not being traced.
+
+config UNLIKELY_TRACER
+       bool "Trace likely/unlikely instances"
+       depends on TRACE_UNLIKELY_PROFILE
+       select TRACING_UNLIKELY
+       help
+         This traces the events of likely and unlikely condition
+         calls in the kernel.  The difference between this and the
+         "Trace likely/unlikely profiler" is that this is not a
+         histogram of the callers, but actually places the calling
+         events into a running trace buffer to see when and where the
+         events happened, as well as their results.
+
+         Say N if unsure.
+
 config STACK_TRACER
        bool "Trace max stack"
        depends on HAVE_FUNCTION_TRACER
 
 obj-y += trace_selftest_dynamic.o
 endif
 
+# If unlikely tracing is enabled, do not trace these files
+ifdef CONFIG_TRACING_UNLIKELY
+KBUILD_CFLAGS += '-Dlikely(x)=likely_notrace(x)'
+KBUILD_CFLAGS += '-Dunlikely(x)=unlikely_notrace(x)'
+endif
+
 obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
 
 
        "sched-tree",
        "ftrace_printk",
        "ftrace_preempt",
+#ifdef CONFIG_UNLIKELY_TRACER
+       "unlikely",
+#endif
        NULL
 };
 
                        trace_seq_print_cont(s, iter);
                break;
        }
+       case TRACE_UNLIKELY: {
+               struct trace_unlikely *field;
+
+               trace_assign_type(field, entry);
+
+               trace_seq_printf(s, "[%s] %s:%s:%d\n",
+                                field->correct ? "correct" : "INCORRECT",
+                                field->func,
+                                field->file,
+                                field->line);
+               break;
+       }
        default:
                trace_seq_printf(s, "Unknown type %d\n", entry->type);
        }
                return print_return_function(iter);
                break;
        }
+       case TRACE_UNLIKELY: {
+               struct trace_unlikely *field;
+
+               trace_assign_type(field, entry);
+
+               trace_seq_printf(s, "[%s] %s:%s:%d\n",
+                                field->correct ? "correct" : "INCORRECT",
+                                field->func,
+                                field->file,
+                                field->line);
+               break;
+       }
        }
        return TRACE_TYPE_HANDLED;
 }
        if (t == current_trace)
                goto out;
 
+       trace_unlikely_disable();
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
 
        if (t->init)
                t->init(tr);
 
+       trace_unlikely_enable(tr);
  out:
        mutex_unlock(&trace_types_lock);
 
 
        TRACE_SPECIAL,
        TRACE_MMIO_RW,
        TRACE_MMIO_MAP,
+       TRACE_UNLIKELY,
        TRACE_BOOT_CALL,
        TRACE_BOOT_RET,
        TRACE_FN_RET,
        struct boot_trace_ret boot_ret;
 };
 
+#define TRACE_FUNC_SIZE 30
+#define TRACE_FILE_SIZE 20
+struct trace_unlikely {
+       struct trace_entry      ent;
+       unsigned                line;
+       char                    func[TRACE_FUNC_SIZE+1];
+       char                    file[TRACE_FILE_SIZE+1];
+       char                    correct;
+};
+
 /*
  * trace_flag_type is an enumeration that holds different
  * states when a trace occurs. These are:
                          TRACE_MMIO_MAP);                              \
                IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
                IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
+               IF_ASSIGN(var, ent, struct trace_unlikely, TRACE_UNLIKELY); \
                IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
                __ftrace_bad_type();                                    \
        } while (0)
        TRACE_ITER_SCHED_TREE           = 0x200,
        TRACE_ITER_PRINTK               = 0x400,
        TRACE_ITER_PREEMPTONLY          = 0x800,
+#ifdef CONFIG_UNLIKELY_TRACER
+       TRACE_ITER_UNLIKELY             = 0x1000,
+#endif
 };
 
 /*
                preempt_enable_notrace();
 }
 
+#ifdef CONFIG_UNLIKELY_TRACER
+extern int enable_unlikely_tracing(struct trace_array *tr);
+extern void disable_unlikely_tracing(void);
+static inline int trace_unlikely_enable(struct trace_array *tr)
+{
+       if (trace_flags & TRACE_ITER_UNLIKELY)
+               return enable_unlikely_tracing(tr);
+       return 0;
+}
+static inline void trace_unlikely_disable(void)
+{
+       /* due to races, always disable */
+       disable_unlikely_tracing();
+}
+#else
+static inline int trace_unlikely_enable(struct trace_array *tr)
+{
+       return 0;
+}
+static inline void trace_unlikely_disable(void)
+{
+}
+#endif /* CONFIG_UNLIKELY_TRACER */
+
 #endif /* _LINUX_KERNEL_TRACE_H */
 
 #include <asm/local.h>
 #include "trace.h"
 
+#ifdef CONFIG_UNLIKELY_TRACER
+
+static int unlikely_tracing_enabled __read_mostly;
+static DEFINE_MUTEX(unlikely_tracing_mutex);
+static struct trace_array *unlikely_tracer;
+
+static void
+probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+{
+       struct trace_array *tr = unlikely_tracer;
+       struct ring_buffer_event *event;
+       struct trace_unlikely *entry;
+       unsigned long flags, irq_flags;
+       int cpu, pc;
+       const char *p;
+
+       /*
+        * I would love to save just the ftrace_likely_data pointer, but
+        * this code can also be used by modules. Ugly things can happen
+        * if the module is unloaded, and then we go and read the
+        * pointer.  This is slower, but much safer.
+        */
+
+       if (unlikely(!tr))
+               return;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
+               goto out;
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               goto out;
+
+       pc = preempt_count();
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type         = TRACE_UNLIKELY;
+
+       /* Strip off the path, only save the file */
+       p = f->file + strlen(f->file);
+       while (p >= f->file && *p != '/')
+               p--;
+       p++;
+
+       strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
+       strncpy(entry->file, p, TRACE_FILE_SIZE);
+       entry->func[TRACE_FUNC_SIZE] = 0;
+       entry->file[TRACE_FILE_SIZE] = 0;
+       entry->line = f->line;
+       entry->correct = val == expect;
+
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+ out:
+       atomic_dec(&tr->data[cpu]->disabled);
+       local_irq_restore(flags);
+}
+
+static inline
+void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+{
+       if (!unlikely_tracing_enabled)
+               return;
+
+       probe_likely_condition(f, val, expect);
+}
+
+int enable_unlikely_tracing(struct trace_array *tr)
+{
+       int ret = 0;
+
+       mutex_lock(&unlikely_tracing_mutex);
+       unlikely_tracer = tr;
+       /*
+        * Must be seen before enabling. The reader is a condition
+        * where we do not need a matching rmb()
+        */
+       smp_wmb();
+       unlikely_tracing_enabled++;
+       mutex_unlock(&unlikely_tracing_mutex);
+
+       return ret;
+}
+
+void disable_unlikely_tracing(void)
+{
+       mutex_lock(&unlikely_tracing_mutex);
+
+       if (!unlikely_tracing_enabled)
+               goto out_unlock;
+
+       unlikely_tracing_enabled--;
+
+ out_unlock:
+       mutex_unlock(&unlikely_tracing_mutex);
+}
+#else
+static inline
+void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+{
+}
+#endif /* CONFIG_UNLIKELY_TRACER */
+
 void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
 {
+       /*
+        * I would love to have a trace point here instead, but the
+        * trace point code is so inundated with unlikely and likely
+        * conditions that the recursive nightmare that exists is too
+        * much to try to get working. At least for now.
+        */
+       trace_likely_condition(f, val, expect);
+
        /* FIXME: Make this atomic! */
        if (val == expect)
                f->correct++;