RB_LEN_TIME_STAMP = 16,
};
- ----/* inline for ring buffer fast paths */
+ ++++static inline int rb_null_event(struct ring_buffer_event *event)
+ ++++{
+ ++++ return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
+ ++++}
+ ++++
+ ++++static inline int rb_discarded_event(struct ring_buffer_event *event)
+ ++++{
+ ++++ return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
+ ++++}
+ ++++
+ ++++static void rb_event_set_padding(struct ring_buffer_event *event)
+ ++++{
+ ++++ event->type = RINGBUF_TYPE_PADDING;
+ ++++ event->time_delta = 0;
+ ++++}
+ ++++
+ ++++/**
+ ++++ * ring_buffer_event_discard - discard an event in the ring buffer
+ ++++ * @buffer: the ring buffer
+ ++++ * @event: the event to discard
+ ++++ *
+ ++++ * Sometimes a event that is in the ring buffer needs to be ignored.
+ ++++ * This function lets the user discard an event in the ring buffer
+ ++++ * and then that event will not be read later.
+ ++++ *
+ ++++ * Note, it is up to the user to be careful with this, and protect
+ ++++ * against races. If the user discards an event that has been consumed
+ ++++ * it is possible that it could corrupt the ring buffer.
+ ++++ */
+ ++++void ring_buffer_event_discard(struct ring_buffer_event *event)
+ ++++{
+ ++++ event->type = RINGBUF_TYPE_PADDING;
+ ++++ /* time delta must be non zero */
+ ++++ if (!event->time_delta)
+ ++++ event->time_delta = 1;
+ ++++}
+ ++++
static unsigned
- ----rb_event_length(struct ring_buffer_event *event)
+ ++++rb_event_data_length(struct ring_buffer_event *event)
{
unsigned length;
+ ++++ if (event->len)
+ ++++ length = event->len * RB_ALIGNMENT;
+ ++++ else
+ ++++ length = event->array[0];
+ ++++ return length + RB_EVNT_HDR_SIZE;
+ ++++}
+ ++++
+ ++++/* inline for ring buffer fast paths */
+ ++++static unsigned
+ ++++rb_event_length(struct ring_buffer_event *event)
+ ++++{
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- ---- /* undefined */
- ---- return -1;
+ ++++ if (rb_null_event(event))
+ ++++ /* undefined */
+ ++++ return -1;
+ ++++ return rb_event_data_length(event);
case RINGBUF_TYPE_TIME_EXTEND:
return RB_LEN_TIME_EXTEND;
return RB_LEN_TIME_STAMP;
case RINGBUF_TYPE_DATA:
- ---- if (event->len)
- ---- length = event->len * RB_ALIGNMENT;
- ---- else
- ---- length = event->array[0];
- ---- return length + RB_EVNT_HDR_SIZE;
+ ++++ return rb_event_data_length(event);
default:
BUG();
}
extern int ring_buffer_page_too_big(void);
#ifdef CONFIG_HOTPLUG_CPU
- - static int __cpuinit rb_cpu_notify(struct notifier_block *self,
- - unsigned long action, void *hcpu);
+ + static int rb_cpu_notify(struct notifier_block *self,
+ + unsigned long action, void *hcpu);
#endif
/**
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
- ----static inline int rb_null_event(struct ring_buffer_event *event)
- ----{
- ---- return event->type == RINGBUF_TYPE_PADDING;
- ----}
- ----
static inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{
if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
- ---- event->type = RINGBUF_TYPE_PADDING;
+ ++++ rb_event_set_padding(event);
}
if (tail <= BUF_PAGE_SIZE)
event = rb_reader_event(cpu_buffer);
- ---- if (event->type == RINGBUF_TYPE_DATA)
+ ++++ if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
cpu_buffer->entries--;
rb_update_read_stamp(cpu_buffer, event);
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- ---- RB_WARN_ON(cpu_buffer, 1);
+ ++++ if (rb_null_event(event))
+ ++++ RB_WARN_ON(cpu_buffer, 1);
+ ++++ /*
+ ++++ * Because the writer could be discarding every
+ ++++ * event it creates (which would probably be bad)
+ ++++ * if we were to go back to "again" then we may never
+ ++++ * catch up, and will trigger the warn on, or lock
+ ++++ * the box. Return the padding, and we will release
+ ++++ * the current locks, and try again.
+ ++++ */
rb_advance_reader(cpu_buffer);
- ---- return NULL;
+ ++++ return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- ---- rb_inc_iter(iter);
- ---- goto again;
+ ++++ if (rb_null_event(event)) {
+ ++++ rb_inc_iter(iter);
+ ++++ goto again;
+ ++++ }
+ ++++ rb_advance_iter(iter);
+ ++++ return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
+ ++++ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ ++++ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ ++++ cpu_relax();
+ ++++ goto again;
+ ++++ }
+ ++++
return event;
}
struct ring_buffer_event *event;
unsigned long flags;
+ ++++ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ ++++ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ ++++ cpu_relax();
+ ++++ goto again;
+ ++++ }
+ ++++
return event;
}
struct ring_buffer_event *event = NULL;
unsigned long flags;
+ ++++ again:
/* might be called in atomic */
preempt_disable();
out:
preempt_enable();
+ ++++ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ ++++ cpu_relax();
+ ++++ goto again;
+ ++++ }
+ ++++
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_consume);
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
+ ++++ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
if (!event)
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ ++++ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ ++++ cpu_relax();
+ ++++ goto again;
+ ++++ }
+ ++++
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_read);
fs_initcall(rb_init_debugfs);
#ifdef CONFIG_HOTPLUG_CPU
- - static int __cpuinit rb_cpu_notify(struct notifier_block *self,
- - unsigned long action, void *hcpu)
+ + static int rb_cpu_notify(struct notifier_block *self,
+ + unsigned long action, void *hcpu)
{
struct ring_buffer *buffer =
container_of(self, struct ring_buffer, cpu_notify);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
-- --- TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO;
++ +++ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME;
/**
* trace_wake_up - wake up tasks waiting for trace input
"context-info",
"latency-format",
"global-clock",
++ +++ "sleep-time",
NULL
};
return cnt;
}
-- ---ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
++ +++static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
void *ret;
static void ftrace_trace_userstack(struct trace_array *tr,
unsigned long flags, int pc);
- ----void trace_buffer_unlock_commit(struct trace_array *tr,
- ---- struct ring_buffer_event *event,
- ---- unsigned long flags, int pc)
+ ++++static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
+ ++++ struct ring_buffer_event *event,
+ ++++ unsigned long flags, int pc,
+ ++++ int wake)
{
ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc);
- ---- trace_wake_up();
+ ++++
+ ++++ if (wake)
+ ++++ trace_wake_up();
+ ++++}
+ ++++
+ ++++void trace_buffer_unlock_commit(struct trace_array *tr,
+ ++++ struct ring_buffer_event *event,
+ ++++ unsigned long flags, int pc)
+ ++++{
+ ++++ __trace_buffer_unlock_commit(tr, event, flags, pc, 1);
}
struct ring_buffer_event *
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- ---- return trace_buffer_unlock_commit(&global_trace, event, flags, pc);
+ ++++ return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
+ ++++}
+ ++++
+ ++++void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ ++++ unsigned long flags, int pc)
+ ++++{
+ ++++ return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
}
void
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- ----static void __trace_graph_entry(struct trace_array *tr,
+ ++++static int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
struct ftrace_graph_ent_entry *entry;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
- ---- return;
+ ++++ return 0;
event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
- ---- return;
+ ++++ return 0;
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event);
+ ++++
+ ++++ return 1;
}
static void __trace_graph_return(struct trace_array *tr,
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
+ ++++ int ret;
int cpu;
int pc;
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
- ---- __trace_graph_entry(tr, trace, flags, pc);
+ ++++ ret = __trace_graph_entry(tr, trace, flags, pc);
+ ++++ } else {
+ ++++ ret = 0;
}
/* Only do the atomic if it is not already set */
if (!test_tsk_trace_graph(current))
set_tsk_trace_graph(current);
+ ++++
atomic_dec(&data->disabled);
local_irq_restore(flags);
- ---- return 1;
+ ++++ return ret;
}
void trace_graph_return(struct ftrace_graph_ret *trace)
* trace_vbprintk - write binary msg to tracing buffer
*
*/
- int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
static raw_spinlock_t trace_buf_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
goto out_unlock;
entry = ring_buffer_event_data(event);
entry->ip = ip;
- entry->depth = depth;
entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len);
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
- int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
goto out_unlock;
entry = ring_buffer_event_data(event);
entry->ip = ip;
- entry->depth = depth;
memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = 0;
return TRACE_TYPE_HANDLED;
}
- static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter)
- {
- struct trace_seq *s = &iter->seq;
- struct trace_entry *entry = iter->ent;
- struct bprint_entry *field;
- int ret;
-
- trace_assign_type(field, entry);
-
- ret = trace_seq_bprintf(s, field->fmt, field->buf);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- return TRACE_TYPE_HANDLED;
- }
-
- static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
- {
- struct trace_seq *s = &iter->seq;
- struct trace_entry *entry = iter->ent;
- struct print_entry *field;
- int ret;
-
- trace_assign_type(field, entry);
-
- ret = trace_seq_printf(s, "%s", field->buf);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- return TRACE_TYPE_HANDLED;
- }
-
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
if (iter->ent->type == TRACE_BPRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
- return print_bprintk_msg_only(iter);
+ return trace_print_bprintk_msg_only(iter);
if (iter->ent->type == TRACE_PRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
- return print_printk_msg_only(iter);
+ return trace_print_printk_msg_only(iter);
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
static int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
- struct trace_iterator *iter = m->private;
+ struct trace_iterator *iter;
int cpu;
+ if (!(file->f_mode & FMODE_READ))
+ return 0;
+
+ iter = m->private;
+
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
struct trace_iterator *iter;
int ret = 0;
- iter = __tracing_open(inode, file);
- if (IS_ERR(iter))
- ret = PTR_ERR(iter);
- else if (trace_flags & TRACE_ITER_LATENCY_FMT)
- iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ /* If this file was open for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) &&
+ !(file->f_flags & O_APPEND)) {
+ long cpu = (long) inode->i_private;
+ if (cpu == TRACE_PIPE_ALL_CPU)
+ tracing_reset_online_cpus(&global_trace);
+ else
+ tracing_reset(&global_trace, cpu);
+ }
+
+ if (file->f_mode & FMODE_READ) {
+ iter = __tracing_open(inode, file);
+ if (IS_ERR(iter))
+ ret = PTR_ERR(iter);
+ else if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ }
return ret;
}
return ret;
}
+ static ssize_t
+ tracing_write_stub(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+ {
+ return count;
+ }
+
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
+ .write = tracing_write_stub,
.llseek = seq_lseek,
.release = tracing_release,
};
int ret;
va_list args;
va_start(args, fmt);
- ret = trace_vprintk(0, -1, fmt, args);
+ ret = trace_vprintk(0, fmt, args);
va_end(args);
return ret;
}
if (d_tracer)
return d_tracer;
++ +++ if (!debugfs_initialized())
++ +++ return NULL;
++ +++
d_tracer = debugfs_create_dir("tracing", NULL);
if (!d_tracer && !once) {
pr_warning("Could not create debugfs 'trace_pipe' entry\n");
/* per cpu trace */
- entry = debugfs_create_file("trace", 0444, d_cpu,
+ entry = debugfs_create_file("trace", 0644, d_cpu,
(void *) cpu, &tracing_fops);
if (!entry)
pr_warning("Could not create debugfs 'trace' entry\n");
if (!entry)
pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
- entry = debugfs_create_file("trace", 0444, d_tracer,
+ entry = debugfs_create_file("trace", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
if (!entry)
pr_warning("Could not create debugfs 'trace' entry\n");
trace_seq_init(s);
}
- void ftrace_dump(void)
+ static void __ftrace_dump(bool disable_tracing)
{
static DEFINE_SPINLOCK(ftrace_dump_lock);
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
+ unsigned int old_userobj;
static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
dump_ran = 1;
- /* No turning back! */
tracing_off();
- ftrace_kill();
+
+ if (disable_tracing)
+ ftrace_kill();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);
}
+ old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
+
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
else
printk(KERN_TRACE "---------------------------------\n");
+ /* Re-enable tracing if requested */
+ if (!disable_tracing) {
+ trace_flags |= old_userobj;
+
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ }
+ tracing_on();
+ }
+
out:
spin_unlock_irqrestore(&ftrace_dump_lock, flags);
}
+ /* By default: disable tracing after the dump */
+ void ftrace_dump(void)
+ {
+ __ftrace_dump(true);
+ }
+
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
struct bprint_entry {
struct trace_entry ent;
unsigned long ip;
- int depth;
const char *fmt;
u32 buf[];
};
struct print_entry {
struct trace_entry ent;
unsigned long ip;
- int depth;
char buf[];
};
unsigned long flags, int pc);
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc);
+ ++++void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ ++++ unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
extern void *head_page(struct trace_array_cpu *data);
extern long ns2usecs(cycle_t nsec);
extern int
- trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args);
+ trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
extern int
- trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
+ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern unsigned long trace_flags;
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
TRACE_ITER_LATENCY_FMT = 0x40000,
TRACE_ITER_GLOBAL_CLK = 0x80000,
++ +++ TRACE_ITER_SLEEP_TIME = 0x100000,
};
/*
TRACE_EVENT_TYPE_RAW = 2,
};
+ ++++struct ftrace_event_field {
+ ++++ struct list_head link;
+ ++++ char *name;
+ ++++ char *type;
+ ++++ int offset;
+ ++++ int size;
+ ++++};
+ ++++
struct ftrace_event_call {
- ---- char *name;
- ---- char *system;
- ---- struct dentry *dir;
- ---- int enabled;
- ---- int (*regfunc)(void);
- ---- void (*unregfunc)(void);
- ---- int id;
- ---- int (*raw_init)(void);
- ---- int (*show_format)(struct trace_seq *s);
+ ++++ char *name;
+ ++++ char *system;
+ ++++ struct dentry *dir;
+ ++++ int enabled;
+ ++++ int (*regfunc)(void);
+ ++++ void (*unregfunc)(void);
+ ++++ int id;
+ ++++ int (*raw_init)(void);
+ ++++ int (*show_format)(struct trace_seq *s);
+ ++++ int (*define_fields)(void);
+ ++++ struct list_head fields;
+ ++++ struct filter_pred **preds;
+
+ #ifdef CONFIG_EVENT_PROFILE
+ atomic_t profile_count;
+ int (*profile_enable)(struct ftrace_event_call *);
+ void (*profile_disable)(struct ftrace_event_call *);
+ #endif
};
+ ++++struct event_subsystem {
+ ++++ struct list_head list;
+ ++++ const char *name;
+ ++++ struct dentry *entry;
+ ++++ struct filter_pred **preds;
+ ++++};
+ ++++
+ ++++#define events_for_each(event) \
+ ++++ for (event = __start_ftrace_events; \
+ ++++ (unsigned long)event < (unsigned long)__stop_ftrace_events; \
+ ++++ event++)
+ ++++
+ ++++#define MAX_FILTER_PRED 8
+ ++++
+ ++++struct filter_pred;
+ ++++
+ ++++typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
+ ++++
+ ++++struct filter_pred {
+ ++++ filter_pred_fn_t fn;
+ ++++ u64 val;
+ ++++ char *str_val;
+ ++++ int str_len;
+ ++++ char *field_name;
+ ++++ int offset;
+ ++++ int not;
+ ++++ int or;
+ ++++ int compound;
+ ++++ int clear;
+ ++++};
+ ++++
+ ++++int trace_define_field(struct ftrace_event_call *call, char *type,
+ ++++ char *name, int offset, int size);
+ ++++extern void filter_free_pred(struct filter_pred *pred);
+ ++++extern void filter_print_preds(struct filter_pred **preds,
+ ++++ struct trace_seq *s);
+ ++++extern int filter_parse(char **pbuf, struct filter_pred *pred);
+ ++++extern int filter_add_pred(struct ftrace_event_call *call,
+ ++++ struct filter_pred *pred);
+ ++++extern void filter_free_preds(struct ftrace_event_call *call);
+ ++++extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
+ ++++extern void filter_free_subsystem_preds(struct event_subsystem *system);
+ ++++extern int filter_add_subsystem_pred(struct event_subsystem *system,
+ ++++ struct filter_pred *pred);
+ ++++
void event_trace_printk(unsigned long ip, const char *fmt, ...);
extern struct ftrace_event_call __start_ftrace_events[];
extern struct ftrace_event_call __stop_ftrace_events[];
+ #define for_each_event(event) \
+ for (event = __start_ftrace_events; \
+ (unsigned long)event < (unsigned long)__stop_ftrace_events; \
+ event++)
+
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];