1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
22 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
24 struct trace_entry *entries;
29 BUG_ON(list_empty(&data->trace_pages));
30 page = list_entry(data->trace_pages.next, struct page, lru);
31 entries = page_address(page);
34 if (head_page(data) != entries)
38 * The starting trace buffer always has valid elements,
39 * if any element exists.
41 entries = head_page(data);
43 for (i = 0; i < tr->entries; i++) {
45 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
46 printk(KERN_CONT ".. invalid entry %d ",
52 if (idx >= ENTRIES_PER_PAGE) {
53 page = virt_to_page(entries);
54 if (page->lru.next == &data->trace_pages) {
55 if (i != tr->entries - 1) {
56 printk(KERN_CONT ".. entries buffer mismatch");
60 page = list_entry(page->lru.next, struct page, lru);
61 entries = page_address(page);
67 page = virt_to_page(entries);
68 if (page->lru.next != &data->trace_pages) {
69 printk(KERN_CONT ".. too many entries");
78 printk(KERN_CONT ".. corrupted trace buffer .. ");
83 * Test the trace buffer to see if all the elements
86 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
88 unsigned long flags, cnt = 0;
91 /* Don't allow flipping of max traces now */
92 raw_local_irq_save(flags);
93 __raw_spin_lock(&ftrace_max_lock);
94 for_each_possible_cpu(cpu) {
95 if (!head_page(tr->data[cpu]))
98 cnt += tr->data[cpu]->trace_idx;
100 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
104 __raw_spin_unlock(&ftrace_max_lock);
105 raw_local_irq_restore(flags);
115 #ifdef CONFIG_DYNAMIC_FTRACE
118 #define STR(x) __STR(x)
120 /* Test dynamic code modification and ftrace filters */
121 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
122 struct trace_array *tr,
125 int save_ftrace_enabled = ftrace_enabled;
126 int save_tracer_enabled = tracer_enabled;
131 /* The ftrace test PASSED */
132 printk(KERN_CONT "PASSED\n");
133 pr_info("Testing dynamic ftrace: ");
135 /* enable tracing, and record the filter function */
139 /* passed in by parameter to fool gcc from optimizing */
142 /* update the records */
143 ret = ftrace_force_update();
145 printk(KERN_CONT ".. ftraced failed .. ");
150 * Some archs *cough*PowerPC*cough* add charachters to the
151 * start of the function names. We simply put a '*' to
154 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
156 /* filter only on our function */
157 ftrace_set_filter(func_name, strlen(func_name), 1);
163 /* Sleep for a 1/10 of a second */
166 /* we should have nothing in the buffer */
167 ret = trace_test_buffer(tr, &count);
173 printk(KERN_CONT ".. filter did not filter .. ");
177 /* call our function again */
183 /* stop the tracing. */
185 trace->ctrl_update(tr);
188 /* check the trace buffer */
189 ret = trace_test_buffer(tr, &count);
192 /* we should only have one item */
193 if (!ret && count != 1) {
194 printk(KERN_CONT ".. filter failed count=%ld ..", count);
199 ftrace_enabled = save_ftrace_enabled;
200 tracer_enabled = save_tracer_enabled;
202 /* Enable tracing on all functions again */
203 ftrace_set_filter(NULL, 0, 1);
208 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
209 #endif /* CONFIG_DYNAMIC_FTRACE */
211 * Simple verification test of ftrace function tracer.
212 * Enable ftrace, sleep 1/10 second, and then read the trace
213 * buffer to see if all is in order.
216 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
218 int save_ftrace_enabled = ftrace_enabled;
219 int save_tracer_enabled = tracer_enabled;
223 /* make sure msleep has been recorded */
226 /* force the recorded functions to be traced */
227 ret = ftrace_force_update();
229 printk(KERN_CONT ".. ftraced failed .. ");
233 /* start the tracing */
239 /* Sleep for a 1/10 of a second */
241 /* stop the tracing. */
243 trace->ctrl_update(tr);
246 /* check the trace buffer */
247 ret = trace_test_buffer(tr, &count);
250 if (!ret && !count) {
251 printk(KERN_CONT ".. no entries found ..");
256 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
257 DYN_FTRACE_TEST_NAME);
260 ftrace_enabled = save_ftrace_enabled;
261 tracer_enabled = save_tracer_enabled;
263 /* kill ftrace totally if we failed */
269 #endif /* CONFIG_FTRACE */
271 #ifdef CONFIG_IRQSOFF_TRACER
273 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
275 unsigned long save_max = tracing_max_latency;
279 /* start the tracing */
282 /* reset the max latency */
283 tracing_max_latency = 0;
284 /* disable interrupts for a bit */
288 /* stop the tracing. */
290 trace->ctrl_update(tr);
291 /* check both trace buffers */
292 ret = trace_test_buffer(tr, NULL);
294 ret = trace_test_buffer(&max_tr, &count);
297 if (!ret && !count) {
298 printk(KERN_CONT ".. no entries found ..");
302 tracing_max_latency = save_max;
306 #endif /* CONFIG_IRQSOFF_TRACER */
308 #ifdef CONFIG_PREEMPT_TRACER
310 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
312 unsigned long save_max = tracing_max_latency;
316 /* start the tracing */
319 /* reset the max latency */
320 tracing_max_latency = 0;
321 /* disable preemption for a bit */
325 /* stop the tracing. */
327 trace->ctrl_update(tr);
328 /* check both trace buffers */
329 ret = trace_test_buffer(tr, NULL);
331 ret = trace_test_buffer(&max_tr, &count);
334 if (!ret && !count) {
335 printk(KERN_CONT ".. no entries found ..");
339 tracing_max_latency = save_max;
343 #endif /* CONFIG_PREEMPT_TRACER */
345 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
347 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
349 unsigned long save_max = tracing_max_latency;
353 /* start the tracing */
357 /* reset the max latency */
358 tracing_max_latency = 0;
360 /* disable preemption and interrupts for a bit */
365 /* reverse the order of preempt vs irqs */
368 /* stop the tracing. */
370 trace->ctrl_update(tr);
371 /* check both trace buffers */
372 ret = trace_test_buffer(tr, NULL);
376 ret = trace_test_buffer(&max_tr, &count);
380 if (!ret && !count) {
381 printk(KERN_CONT ".. no entries found ..");
386 /* do the test by disabling interrupts first this time */
387 tracing_max_latency = 0;
389 trace->ctrl_update(tr);
394 /* reverse the order of preempt vs irqs */
397 /* stop the tracing. */
399 trace->ctrl_update(tr);
400 /* check both trace buffers */
401 ret = trace_test_buffer(tr, NULL);
405 ret = trace_test_buffer(&max_tr, &count);
407 if (!ret && !count) {
408 printk(KERN_CONT ".. no entries found ..");
415 tracing_max_latency = save_max;
419 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
421 #ifdef CONFIG_NOP_TRACER
423 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
425 /* What could possibly go wrong? */
430 #ifdef CONFIG_SCHED_TRACER
431 static int trace_wakeup_test_thread(void *data)
433 /* Make this a RT thread, doesn't need to be too high */
434 struct sched_param param = { .sched_priority = 5 };
435 struct completion *x = data;
437 sched_setscheduler(current, SCHED_FIFO, ¶m);
439 /* Make it know we have a new prio */
442 /* now go to sleep and let the test wake us up */
443 set_current_state(TASK_INTERRUPTIBLE);
446 /* we are awake, now wait to disappear */
447 while (!kthread_should_stop()) {
449 * This is an RT task, do short sleeps to let
459 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
461 unsigned long save_max = tracing_max_latency;
462 struct task_struct *p;
463 struct completion isrt;
467 init_completion(&isrt);
469 /* create a high prio thread */
470 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
472 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
476 /* make sure the thread is running at an RT prio */
477 wait_for_completion(&isrt);
479 /* start the tracing */
482 /* reset the max latency */
483 tracing_max_latency = 0;
485 /* sleep to let the RT thread sleep too */
489 * Yes this is slightly racy. It is possible that for some
490 * strange reason that the RT thread we created, did not
491 * call schedule for 100ms after doing the completion,
492 * and we do a wakeup on a task that already is awake.
493 * But that is extremely unlikely, and the worst thing that
494 * happens in such a case, is that we disable tracing.
495 * Honestly, if this race does happen something is horrible
496 * wrong with the system.
501 /* give a little time to let the thread wake up */
504 /* stop the tracing. */
506 trace->ctrl_update(tr);
507 /* check both trace buffers */
508 ret = trace_test_buffer(tr, NULL);
510 ret = trace_test_buffer(&max_tr, &count);
515 tracing_max_latency = save_max;
517 /* kill the thread */
520 if (!ret && !count) {
521 printk(KERN_CONT ".. no entries found ..");
527 #endif /* CONFIG_SCHED_TRACER */
529 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
531 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
536 /* start the tracing */
539 /* Sleep for a 1/10 of a second */
541 /* stop the tracing. */
543 trace->ctrl_update(tr);
544 /* check the trace buffer */
545 ret = trace_test_buffer(tr, &count);
548 if (!ret && !count) {
549 printk(KERN_CONT ".. no entries found ..");
555 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
557 #ifdef CONFIG_SYSPROF_TRACER
559 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
564 /* start the tracing */
567 /* Sleep for a 1/10 of a second */
569 /* stop the tracing. */
571 trace->ctrl_update(tr);
572 /* check the trace buffer */
573 ret = trace_test_buffer(tr, &count);
578 #endif /* CONFIG_SYSPROF_TRACER */