]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ring_buffer.c
d42b882dfe4ba87131beb591aba51179fe4bb209
[linux-2.6-omap-h63xx.git] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>        /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /* Global flag to disable all recording to ring buffers */
22 static int ring_buffers_off __read_mostly;
23
24 /**
25  * tracing_on - enable all tracing buffers
26  *
27  * This function enables all tracing buffers that may have been
28  * disabled with tracing_off.
29  */
30 void tracing_on(void)
31 {
32         ring_buffers_off = 0;
33 }
34 EXPORT_SYMBOL_GPL(tracing_on);
35
36 /**
37  * tracing_off - turn off all tracing buffers
38  *
39  * This function stops all tracing buffers from recording data.
40  * It does not disable any overhead the tracers themselves may
41  * be causing. This function simply causes all recording to
42  * the ring buffers to fail.
43  */
44 void tracing_off(void)
45 {
46         ring_buffers_off = 1;
47 }
48 EXPORT_SYMBOL_GPL(tracing_off);
49
50 /* Up this if you want to test the TIME_EXTENTS and normalization */
51 #define DEBUG_SHIFT 0
52
53 /* FIXME!!! */
54 u64 ring_buffer_time_stamp(int cpu)
55 {
56         u64 time;
57
58         preempt_disable_notrace();
59         /* shift to debug/test normalization and TIME_EXTENTS */
60         time = sched_clock() << DEBUG_SHIFT;
61         preempt_enable_notrace();
62
63         return time;
64 }
65 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
66
67 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
68 {
69         /* Just stupid testing the normalize function and deltas */
70         *ts >>= DEBUG_SHIFT;
71 }
72 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
73
74 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
75 #define RB_ALIGNMENT_SHIFT      2
76 #define RB_ALIGNMENT            (1 << RB_ALIGNMENT_SHIFT)
77 #define RB_MAX_SMALL_DATA       28
78
79 enum {
80         RB_LEN_TIME_EXTEND = 8,
81         RB_LEN_TIME_STAMP = 16,
82 };
83
84 /* inline for ring buffer fast paths */
85 static inline unsigned
86 rb_event_length(struct ring_buffer_event *event)
87 {
88         unsigned length;
89
90         switch (event->type) {
91         case RINGBUF_TYPE_PADDING:
92                 /* undefined */
93                 return -1;
94
95         case RINGBUF_TYPE_TIME_EXTEND:
96                 return RB_LEN_TIME_EXTEND;
97
98         case RINGBUF_TYPE_TIME_STAMP:
99                 return RB_LEN_TIME_STAMP;
100
101         case RINGBUF_TYPE_DATA:
102                 if (event->len)
103                         length = event->len << RB_ALIGNMENT_SHIFT;
104                 else
105                         length = event->array[0];
106                 return length + RB_EVNT_HDR_SIZE;
107         default:
108                 BUG();
109         }
110         /* not hit */
111         return 0;
112 }
113
114 /**
115  * ring_buffer_event_length - return the length of the event
116  * @event: the event to get the length of
117  */
118 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
119 {
120         unsigned length = rb_event_length(event);
121         if (event->type != RINGBUF_TYPE_DATA)
122                 return length;
123         length -= RB_EVNT_HDR_SIZE;
124         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
125                 length -= sizeof(event->array[0]);
126         return length;
127 }
128 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
129
130 /* inline for ring buffer fast paths */
131 static inline void *
132 rb_event_data(struct ring_buffer_event *event)
133 {
134         BUG_ON(event->type != RINGBUF_TYPE_DATA);
135         /* If length is in len field, then array[0] has the data */
136         if (event->len)
137                 return (void *)&event->array[0];
138         /* Otherwise length is in array[0] and array[1] has the data */
139         return (void *)&event->array[1];
140 }
141
142 /**
143  * ring_buffer_event_data - return the data of the event
144  * @event: the event to get the data from
145  */
146 void *ring_buffer_event_data(struct ring_buffer_event *event)
147 {
148         return rb_event_data(event);
149 }
150 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
151
152 #define for_each_buffer_cpu(buffer, cpu)                \
153         for_each_cpu_mask(cpu, buffer->cpumask)
154
155 #define TS_SHIFT        27
156 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
157 #define TS_DELTA_TEST   (~TS_MASK)
158
159 /*
160  * This hack stolen from mm/slob.c.
161  * We can store per page timing information in the page frame of the page.
162  * Thanks to Peter Zijlstra for suggesting this idea.
163  */
164 struct buffer_page {
165         u64              time_stamp;    /* page time stamp */
166         local_t          write;         /* index for next write */
167         local_t          commit;        /* write commited index */
168         unsigned         read;          /* index for next read */
169         struct list_head list;          /* list of free pages */
170         void *page;                     /* Actual data page */
171 };
172
173 /*
174  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
175  * this issue out.
176  */
177 static inline void free_buffer_page(struct buffer_page *bpage)
178 {
179         if (bpage->page)
180                 free_page((unsigned long)bpage->page);
181         kfree(bpage);
182 }
183
184 /*
185  * We need to fit the time_stamp delta into 27 bits.
186  */
187 static inline int test_time_stamp(u64 delta)
188 {
189         if (delta & TS_DELTA_TEST)
190                 return 1;
191         return 0;
192 }
193
194 #define BUF_PAGE_SIZE PAGE_SIZE
195
196 /*
197  * head_page == tail_page && head == tail then buffer is empty.
198  */
199 struct ring_buffer_per_cpu {
200         int                             cpu;
201         struct ring_buffer              *buffer;
202         spinlock_t                      lock;
203         struct lock_class_key           lock_key;
204         struct list_head                pages;
205         struct buffer_page              *head_page;     /* read from head */
206         struct buffer_page              *tail_page;     /* write to tail */
207         struct buffer_page              *commit_page;   /* commited pages */
208         struct buffer_page              *reader_page;
209         unsigned long                   overrun;
210         unsigned long                   entries;
211         u64                             write_stamp;
212         u64                             read_stamp;
213         atomic_t                        record_disabled;
214 };
215
216 struct ring_buffer {
217         unsigned long                   size;
218         unsigned                        pages;
219         unsigned                        flags;
220         int                             cpus;
221         cpumask_t                       cpumask;
222         atomic_t                        record_disabled;
223
224         struct mutex                    mutex;
225
226         struct ring_buffer_per_cpu      **buffers;
227 };
228
229 struct ring_buffer_iter {
230         struct ring_buffer_per_cpu      *cpu_buffer;
231         unsigned long                   head;
232         struct buffer_page              *head_page;
233         u64                             read_stamp;
234 };
235
236 #define RB_WARN_ON(buffer, cond)                                \
237         do {                                                    \
238                 if (unlikely(cond)) {                           \
239                         atomic_inc(&buffer->record_disabled);   \
240                         WARN_ON(1);                             \
241                 }                                               \
242         } while (0)
243
244 #define RB_WARN_ON_RET(buffer, cond)                            \
245         do {                                                    \
246                 if (unlikely(cond)) {                           \
247                         atomic_inc(&buffer->record_disabled);   \
248                         WARN_ON(1);                             \
249                         return -1;                              \
250                 }                                               \
251         } while (0)
252
253 #define RB_WARN_ON_ONCE(buffer, cond)                           \
254         do {                                                    \
255                 static int once;                                \
256                 if (unlikely(cond) && !once) {                  \
257                         once++;                                 \
258                         atomic_inc(&buffer->record_disabled);   \
259                         WARN_ON(1);                             \
260                 }                                               \
261         } while (0)
262
263 /**
264  * check_pages - integrity check of buffer pages
265  * @cpu_buffer: CPU buffer with pages to test
266  *
267  * As a safty measure we check to make sure the data pages have not
268  * been corrupted.
269  */
270 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
271 {
272         struct list_head *head = &cpu_buffer->pages;
273         struct buffer_page *page, *tmp;
274
275         RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
276         RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
277
278         list_for_each_entry_safe(page, tmp, head, list) {
279                 RB_WARN_ON_RET(cpu_buffer,
280                                page->list.next->prev != &page->list);
281                 RB_WARN_ON_RET(cpu_buffer,
282                                page->list.prev->next != &page->list);
283         }
284
285         return 0;
286 }
287
288 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
289                              unsigned nr_pages)
290 {
291         struct list_head *head = &cpu_buffer->pages;
292         struct buffer_page *page, *tmp;
293         unsigned long addr;
294         LIST_HEAD(pages);
295         unsigned i;
296
297         for (i = 0; i < nr_pages; i++) {
298                 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
299                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
300                 if (!page)
301                         goto free_pages;
302                 list_add(&page->list, &pages);
303
304                 addr = __get_free_page(GFP_KERNEL);
305                 if (!addr)
306                         goto free_pages;
307                 page->page = (void *)addr;
308         }
309
310         list_splice(&pages, head);
311
312         rb_check_pages(cpu_buffer);
313
314         return 0;
315
316  free_pages:
317         list_for_each_entry_safe(page, tmp, &pages, list) {
318                 list_del_init(&page->list);
319                 free_buffer_page(page);
320         }
321         return -ENOMEM;
322 }
323
324 static struct ring_buffer_per_cpu *
325 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
326 {
327         struct ring_buffer_per_cpu *cpu_buffer;
328         struct buffer_page *page;
329         unsigned long addr;
330         int ret;
331
332         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
333                                   GFP_KERNEL, cpu_to_node(cpu));
334         if (!cpu_buffer)
335                 return NULL;
336
337         cpu_buffer->cpu = cpu;
338         cpu_buffer->buffer = buffer;
339         spin_lock_init(&cpu_buffer->lock);
340         INIT_LIST_HEAD(&cpu_buffer->pages);
341
342         page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
343                             GFP_KERNEL, cpu_to_node(cpu));
344         if (!page)
345                 goto fail_free_buffer;
346
347         cpu_buffer->reader_page = page;
348         addr = __get_free_page(GFP_KERNEL);
349         if (!addr)
350                 goto fail_free_reader;
351         page->page = (void *)addr;
352
353         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
354
355         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
356         if (ret < 0)
357                 goto fail_free_reader;
358
359         cpu_buffer->head_page
360                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
361         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
362
363         return cpu_buffer;
364
365  fail_free_reader:
366         free_buffer_page(cpu_buffer->reader_page);
367
368  fail_free_buffer:
369         kfree(cpu_buffer);
370         return NULL;
371 }
372
373 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
374 {
375         struct list_head *head = &cpu_buffer->pages;
376         struct buffer_page *page, *tmp;
377
378         list_del_init(&cpu_buffer->reader_page->list);
379         free_buffer_page(cpu_buffer->reader_page);
380
381         list_for_each_entry_safe(page, tmp, head, list) {
382                 list_del_init(&page->list);
383                 free_buffer_page(page);
384         }
385         kfree(cpu_buffer);
386 }
387
388 /*
389  * Causes compile errors if the struct buffer_page gets bigger
390  * than the struct page.
391  */
392 extern int ring_buffer_page_too_big(void);
393
394 /**
395  * ring_buffer_alloc - allocate a new ring_buffer
396  * @size: the size in bytes per cpu that is needed.
397  * @flags: attributes to set for the ring buffer.
398  *
399  * Currently the only flag that is available is the RB_FL_OVERWRITE
400  * flag. This flag means that the buffer will overwrite old data
401  * when the buffer wraps. If this flag is not set, the buffer will
402  * drop data when the tail hits the head.
403  */
404 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
405 {
406         struct ring_buffer *buffer;
407         int bsize;
408         int cpu;
409
410         /* Paranoid! Optimizes out when all is well */
411         if (sizeof(struct buffer_page) > sizeof(struct page))
412                 ring_buffer_page_too_big();
413
414
415         /* keep it in its own cache line */
416         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
417                          GFP_KERNEL);
418         if (!buffer)
419                 return NULL;
420
421         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
422         buffer->flags = flags;
423
424         /* need at least two pages */
425         if (buffer->pages == 1)
426                 buffer->pages++;
427
428         buffer->cpumask = cpu_possible_map;
429         buffer->cpus = nr_cpu_ids;
430
431         bsize = sizeof(void *) * nr_cpu_ids;
432         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
433                                   GFP_KERNEL);
434         if (!buffer->buffers)
435                 goto fail_free_buffer;
436
437         for_each_buffer_cpu(buffer, cpu) {
438                 buffer->buffers[cpu] =
439                         rb_allocate_cpu_buffer(buffer, cpu);
440                 if (!buffer->buffers[cpu])
441                         goto fail_free_buffers;
442         }
443
444         mutex_init(&buffer->mutex);
445
446         return buffer;
447
448  fail_free_buffers:
449         for_each_buffer_cpu(buffer, cpu) {
450                 if (buffer->buffers[cpu])
451                         rb_free_cpu_buffer(buffer->buffers[cpu]);
452         }
453         kfree(buffer->buffers);
454
455  fail_free_buffer:
456         kfree(buffer);
457         return NULL;
458 }
459 EXPORT_SYMBOL_GPL(ring_buffer_alloc);
460
461 /**
462  * ring_buffer_free - free a ring buffer.
463  * @buffer: the buffer to free.
464  */
465 void
466 ring_buffer_free(struct ring_buffer *buffer)
467 {
468         int cpu;
469
470         for_each_buffer_cpu(buffer, cpu)
471                 rb_free_cpu_buffer(buffer->buffers[cpu]);
472
473         kfree(buffer);
474 }
475 EXPORT_SYMBOL_GPL(ring_buffer_free);
476
477 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
478
479 static void
480 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
481 {
482         struct buffer_page *page;
483         struct list_head *p;
484         unsigned i;
485
486         atomic_inc(&cpu_buffer->record_disabled);
487         synchronize_sched();
488
489         for (i = 0; i < nr_pages; i++) {
490                 BUG_ON(list_empty(&cpu_buffer->pages));
491                 p = cpu_buffer->pages.next;
492                 page = list_entry(p, struct buffer_page, list);
493                 list_del_init(&page->list);
494                 free_buffer_page(page);
495         }
496         BUG_ON(list_empty(&cpu_buffer->pages));
497
498         rb_reset_cpu(cpu_buffer);
499
500         rb_check_pages(cpu_buffer);
501
502         atomic_dec(&cpu_buffer->record_disabled);
503
504 }
505
506 static void
507 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
508                 struct list_head *pages, unsigned nr_pages)
509 {
510         struct buffer_page *page;
511         struct list_head *p;
512         unsigned i;
513
514         atomic_inc(&cpu_buffer->record_disabled);
515         synchronize_sched();
516
517         for (i = 0; i < nr_pages; i++) {
518                 BUG_ON(list_empty(pages));
519                 p = pages->next;
520                 page = list_entry(p, struct buffer_page, list);
521                 list_del_init(&page->list);
522                 list_add_tail(&page->list, &cpu_buffer->pages);
523         }
524         rb_reset_cpu(cpu_buffer);
525
526         rb_check_pages(cpu_buffer);
527
528         atomic_dec(&cpu_buffer->record_disabled);
529 }
530
531 /**
532  * ring_buffer_resize - resize the ring buffer
533  * @buffer: the buffer to resize.
534  * @size: the new size.
535  *
536  * The tracer is responsible for making sure that the buffer is
537  * not being used while changing the size.
538  * Note: We may be able to change the above requirement by using
539  *  RCU synchronizations.
540  *
541  * Minimum size is 2 * BUF_PAGE_SIZE.
542  *
543  * Returns -1 on failure.
544  */
545 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
546 {
547         struct ring_buffer_per_cpu *cpu_buffer;
548         unsigned nr_pages, rm_pages, new_pages;
549         struct buffer_page *page, *tmp;
550         unsigned long buffer_size;
551         unsigned long addr;
552         LIST_HEAD(pages);
553         int i, cpu;
554
555         /*
556          * Always succeed at resizing a non-existent buffer:
557          */
558         if (!buffer)
559                 return size;
560
561         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
562         size *= BUF_PAGE_SIZE;
563         buffer_size = buffer->pages * BUF_PAGE_SIZE;
564
565         /* we need a minimum of two pages */
566         if (size < BUF_PAGE_SIZE * 2)
567                 size = BUF_PAGE_SIZE * 2;
568
569         if (size == buffer_size)
570                 return size;
571
572         mutex_lock(&buffer->mutex);
573
574         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
575
576         if (size < buffer_size) {
577
578                 /* easy case, just free pages */
579                 BUG_ON(nr_pages >= buffer->pages);
580
581                 rm_pages = buffer->pages - nr_pages;
582
583                 for_each_buffer_cpu(buffer, cpu) {
584                         cpu_buffer = buffer->buffers[cpu];
585                         rb_remove_pages(cpu_buffer, rm_pages);
586                 }
587                 goto out;
588         }
589
590         /*
591          * This is a bit more difficult. We only want to add pages
592          * when we can allocate enough for all CPUs. We do this
593          * by allocating all the pages and storing them on a local
594          * link list. If we succeed in our allocation, then we
595          * add these pages to the cpu_buffers. Otherwise we just free
596          * them all and return -ENOMEM;
597          */
598         BUG_ON(nr_pages <= buffer->pages);
599         new_pages = nr_pages - buffer->pages;
600
601         for_each_buffer_cpu(buffer, cpu) {
602                 for (i = 0; i < new_pages; i++) {
603                         page = kzalloc_node(ALIGN(sizeof(*page),
604                                                   cache_line_size()),
605                                             GFP_KERNEL, cpu_to_node(cpu));
606                         if (!page)
607                                 goto free_pages;
608                         list_add(&page->list, &pages);
609                         addr = __get_free_page(GFP_KERNEL);
610                         if (!addr)
611                                 goto free_pages;
612                         page->page = (void *)addr;
613                 }
614         }
615
616         for_each_buffer_cpu(buffer, cpu) {
617                 cpu_buffer = buffer->buffers[cpu];
618                 rb_insert_pages(cpu_buffer, &pages, new_pages);
619         }
620
621         BUG_ON(!list_empty(&pages));
622
623  out:
624         buffer->pages = nr_pages;
625         mutex_unlock(&buffer->mutex);
626
627         return size;
628
629  free_pages:
630         list_for_each_entry_safe(page, tmp, &pages, list) {
631                 list_del_init(&page->list);
632                 free_buffer_page(page);
633         }
634         mutex_unlock(&buffer->mutex);
635         return -ENOMEM;
636 }
637 EXPORT_SYMBOL_GPL(ring_buffer_resize);
638
639 static inline int rb_null_event(struct ring_buffer_event *event)
640 {
641         return event->type == RINGBUF_TYPE_PADDING;
642 }
643
644 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
645 {
646         return page->page + index;
647 }
648
649 static inline struct ring_buffer_event *
650 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
651 {
652         return __rb_page_index(cpu_buffer->reader_page,
653                                cpu_buffer->reader_page->read);
654 }
655
656 static inline struct ring_buffer_event *
657 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
658 {
659         return __rb_page_index(cpu_buffer->head_page,
660                                cpu_buffer->head_page->read);
661 }
662
663 static inline struct ring_buffer_event *
664 rb_iter_head_event(struct ring_buffer_iter *iter)
665 {
666         return __rb_page_index(iter->head_page, iter->head);
667 }
668
669 static inline unsigned rb_page_write(struct buffer_page *bpage)
670 {
671         return local_read(&bpage->write);
672 }
673
674 static inline unsigned rb_page_commit(struct buffer_page *bpage)
675 {
676         return local_read(&bpage->commit);
677 }
678
679 /* Size is determined by what has been commited */
680 static inline unsigned rb_page_size(struct buffer_page *bpage)
681 {
682         return rb_page_commit(bpage);
683 }
684
685 static inline unsigned
686 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
687 {
688         return rb_page_commit(cpu_buffer->commit_page);
689 }
690
691 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
692 {
693         return rb_page_commit(cpu_buffer->head_page);
694 }
695
696 /*
697  * When the tail hits the head and the buffer is in overwrite mode,
698  * the head jumps to the next page and all content on the previous
699  * page is discarded. But before doing so, we update the overrun
700  * variable of the buffer.
701  */
702 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
703 {
704         struct ring_buffer_event *event;
705         unsigned long head;
706
707         for (head = 0; head < rb_head_size(cpu_buffer);
708              head += rb_event_length(event)) {
709
710                 event = __rb_page_index(cpu_buffer->head_page, head);
711                 BUG_ON(rb_null_event(event));
712                 /* Only count data entries */
713                 if (event->type != RINGBUF_TYPE_DATA)
714                         continue;
715                 cpu_buffer->overrun++;
716                 cpu_buffer->entries--;
717         }
718 }
719
720 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
721                                struct buffer_page **page)
722 {
723         struct list_head *p = (*page)->list.next;
724
725         if (p == &cpu_buffer->pages)
726                 p = p->next;
727
728         *page = list_entry(p, struct buffer_page, list);
729 }
730
731 static inline unsigned
732 rb_event_index(struct ring_buffer_event *event)
733 {
734         unsigned long addr = (unsigned long)event;
735
736         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
737 }
738
739 static inline int
740 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
741              struct ring_buffer_event *event)
742 {
743         unsigned long addr = (unsigned long)event;
744         unsigned long index;
745
746         index = rb_event_index(event);
747         addr &= PAGE_MASK;
748
749         return cpu_buffer->commit_page->page == (void *)addr &&
750                 rb_commit_index(cpu_buffer) == index;
751 }
752
753 static inline void
754 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
755                     struct ring_buffer_event *event)
756 {
757         unsigned long addr = (unsigned long)event;
758         unsigned long index;
759
760         index = rb_event_index(event);
761         addr &= PAGE_MASK;
762
763         while (cpu_buffer->commit_page->page != (void *)addr) {
764                 RB_WARN_ON(cpu_buffer,
765                            cpu_buffer->commit_page == cpu_buffer->tail_page);
766                 cpu_buffer->commit_page->commit =
767                         cpu_buffer->commit_page->write;
768                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
769                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
770         }
771
772         /* Now set the commit to the event's index */
773         local_set(&cpu_buffer->commit_page->commit, index);
774 }
775
776 static inline void
777 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
778 {
779         /*
780          * We only race with interrupts and NMIs on this CPU.
781          * If we own the commit event, then we can commit
782          * all others that interrupted us, since the interruptions
783          * are in stack format (they finish before they come
784          * back to us). This allows us to do a simple loop to
785          * assign the commit to the tail.
786          */
787         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
788                 cpu_buffer->commit_page->commit =
789                         cpu_buffer->commit_page->write;
790                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
791                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
792                 /* add barrier to keep gcc from optimizing too much */
793                 barrier();
794         }
795         while (rb_commit_index(cpu_buffer) !=
796                rb_page_write(cpu_buffer->commit_page)) {
797                 cpu_buffer->commit_page->commit =
798                         cpu_buffer->commit_page->write;
799                 barrier();
800         }
801 }
802
803 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
804 {
805         cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
806         cpu_buffer->reader_page->read = 0;
807 }
808
809 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
810 {
811         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
812
813         /*
814          * The iterator could be on the reader page (it starts there).
815          * But the head could have moved, since the reader was
816          * found. Check for this case and assign the iterator
817          * to the head page instead of next.
818          */
819         if (iter->head_page == cpu_buffer->reader_page)
820                 iter->head_page = cpu_buffer->head_page;
821         else
822                 rb_inc_page(cpu_buffer, &iter->head_page);
823
824         iter->read_stamp = iter->head_page->time_stamp;
825         iter->head = 0;
826 }
827
828 /**
829  * ring_buffer_update_event - update event type and data
830  * @event: the even to update
831  * @type: the type of event
832  * @length: the size of the event field in the ring buffer
833  *
834  * Update the type and data fields of the event. The length
835  * is the actual size that is written to the ring buffer,
836  * and with this, we can determine what to place into the
837  * data field.
838  */
839 static inline void
840 rb_update_event(struct ring_buffer_event *event,
841                          unsigned type, unsigned length)
842 {
843         event->type = type;
844
845         switch (type) {
846
847         case RINGBUF_TYPE_PADDING:
848                 break;
849
850         case RINGBUF_TYPE_TIME_EXTEND:
851                 event->len =
852                         (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
853                         >> RB_ALIGNMENT_SHIFT;
854                 break;
855
856         case RINGBUF_TYPE_TIME_STAMP:
857                 event->len =
858                         (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
859                         >> RB_ALIGNMENT_SHIFT;
860                 break;
861
862         case RINGBUF_TYPE_DATA:
863                 length -= RB_EVNT_HDR_SIZE;
864                 if (length > RB_MAX_SMALL_DATA) {
865                         event->len = 0;
866                         event->array[0] = length;
867                 } else
868                         event->len =
869                                 (length + (RB_ALIGNMENT-1))
870                                 >> RB_ALIGNMENT_SHIFT;
871                 break;
872         default:
873                 BUG();
874         }
875 }
876
877 static inline unsigned rb_calculate_event_length(unsigned length)
878 {
879         struct ring_buffer_event event; /* Used only for sizeof array */
880
881         /* zero length can cause confusions */
882         if (!length)
883                 length = 1;
884
885         if (length > RB_MAX_SMALL_DATA)
886                 length += sizeof(event.array[0]);
887
888         length += RB_EVNT_HDR_SIZE;
889         length = ALIGN(length, RB_ALIGNMENT);
890
891         return length;
892 }
893
894 static struct ring_buffer_event *
895 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
896                   unsigned type, unsigned long length, u64 *ts)
897 {
898         struct buffer_page *tail_page, *head_page, *reader_page;
899         unsigned long tail, write;
900         struct ring_buffer *buffer = cpu_buffer->buffer;
901         struct ring_buffer_event *event;
902         unsigned long flags;
903
904         tail_page = cpu_buffer->tail_page;
905         write = local_add_return(length, &tail_page->write);
906         tail = write - length;
907
908         /* See if we shot pass the end of this buffer page */
909         if (write > BUF_PAGE_SIZE) {
910                 struct buffer_page *next_page = tail_page;
911
912                 spin_lock_irqsave(&cpu_buffer->lock, flags);
913
914                 rb_inc_page(cpu_buffer, &next_page);
915
916                 head_page = cpu_buffer->head_page;
917                 reader_page = cpu_buffer->reader_page;
918
919                 /* we grabbed the lock before incrementing */
920                 RB_WARN_ON(cpu_buffer, next_page == reader_page);
921
922                 /*
923                  * If for some reason, we had an interrupt storm that made
924                  * it all the way around the buffer, bail, and warn
925                  * about it.
926                  */
927                 if (unlikely(next_page == cpu_buffer->commit_page)) {
928                         WARN_ON_ONCE(1);
929                         goto out_unlock;
930                 }
931
932                 if (next_page == head_page) {
933                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
934                                 /* reset write */
935                                 if (tail <= BUF_PAGE_SIZE)
936                                         local_set(&tail_page->write, tail);
937                                 goto out_unlock;
938                         }
939
940                         /* tail_page has not moved yet? */
941                         if (tail_page == cpu_buffer->tail_page) {
942                                 /* count overflows */
943                                 rb_update_overflow(cpu_buffer);
944
945                                 rb_inc_page(cpu_buffer, &head_page);
946                                 cpu_buffer->head_page = head_page;
947                                 cpu_buffer->head_page->read = 0;
948                         }
949                 }
950
951                 /*
952                  * If the tail page is still the same as what we think
953                  * it is, then it is up to us to update the tail
954                  * pointer.
955                  */
956                 if (tail_page == cpu_buffer->tail_page) {
957                         local_set(&next_page->write, 0);
958                         local_set(&next_page->commit, 0);
959                         cpu_buffer->tail_page = next_page;
960
961                         /* reread the time stamp */
962                         *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
963                         cpu_buffer->tail_page->time_stamp = *ts;
964                 }
965
966                 /*
967                  * The actual tail page has moved forward.
968                  */
969                 if (tail < BUF_PAGE_SIZE) {
970                         /* Mark the rest of the page with padding */
971                         event = __rb_page_index(tail_page, tail);
972                         event->type = RINGBUF_TYPE_PADDING;
973                 }
974
975                 if (tail <= BUF_PAGE_SIZE)
976                         /* Set the write back to the previous setting */
977                         local_set(&tail_page->write, tail);
978
979                 /*
980                  * If this was a commit entry that failed,
981                  * increment that too
982                  */
983                 if (tail_page == cpu_buffer->commit_page &&
984                     tail == rb_commit_index(cpu_buffer)) {
985                         rb_set_commit_to_write(cpu_buffer);
986                 }
987
988                 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
989
990                 /* fail and let the caller try again */
991                 return ERR_PTR(-EAGAIN);
992         }
993
994         /* We reserved something on the buffer */
995
996         BUG_ON(write > BUF_PAGE_SIZE);
997
998         event = __rb_page_index(tail_page, tail);
999         rb_update_event(event, type, length);
1000
1001         /*
1002          * If this is a commit and the tail is zero, then update
1003          * this page's time stamp.
1004          */
1005         if (!tail && rb_is_commit(cpu_buffer, event))
1006                 cpu_buffer->commit_page->time_stamp = *ts;
1007
1008         return event;
1009
1010  out_unlock:
1011         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1012         return NULL;
1013 }
1014
1015 static int
1016 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1017                   u64 *ts, u64 *delta)
1018 {
1019         struct ring_buffer_event *event;
1020         static int once;
1021         int ret;
1022
1023         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1024                 printk(KERN_WARNING "Delta way too big! %llu"
1025                        " ts=%llu write stamp = %llu\n",
1026                        (unsigned long long)*delta,
1027                        (unsigned long long)*ts,
1028                        (unsigned long long)cpu_buffer->write_stamp);
1029                 WARN_ON(1);
1030         }
1031
1032         /*
1033          * The delta is too big, we to add a
1034          * new timestamp.
1035          */
1036         event = __rb_reserve_next(cpu_buffer,
1037                                   RINGBUF_TYPE_TIME_EXTEND,
1038                                   RB_LEN_TIME_EXTEND,
1039                                   ts);
1040         if (!event)
1041                 return -EBUSY;
1042
1043         if (PTR_ERR(event) == -EAGAIN)
1044                 return -EAGAIN;
1045
1046         /* Only a commited time event can update the write stamp */
1047         if (rb_is_commit(cpu_buffer, event)) {
1048                 /*
1049                  * If this is the first on the page, then we need to
1050                  * update the page itself, and just put in a zero.
1051                  */
1052                 if (rb_event_index(event)) {
1053                         event->time_delta = *delta & TS_MASK;
1054                         event->array[0] = *delta >> TS_SHIFT;
1055                 } else {
1056                         cpu_buffer->commit_page->time_stamp = *ts;
1057                         event->time_delta = 0;
1058                         event->array[0] = 0;
1059                 }
1060                 cpu_buffer->write_stamp = *ts;
1061                 /* let the caller know this was the commit */
1062                 ret = 1;
1063         } else {
1064                 /* Darn, this is just wasted space */
1065                 event->time_delta = 0;
1066                 event->array[0] = 0;
1067                 ret = 0;
1068         }
1069
1070         *delta = 0;
1071
1072         return ret;
1073 }
1074
1075 static struct ring_buffer_event *
1076 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1077                       unsigned type, unsigned long length)
1078 {
1079         struct ring_buffer_event *event;
1080         u64 ts, delta;
1081         int commit = 0;
1082         int nr_loops = 0;
1083
1084  again:
1085         /*
1086          * We allow for interrupts to reenter here and do a trace.
1087          * If one does, it will cause this original code to loop
1088          * back here. Even with heavy interrupts happening, this
1089          * should only happen a few times in a row. If this happens
1090          * 1000 times in a row, there must be either an interrupt
1091          * storm or we have something buggy.
1092          * Bail!
1093          */
1094         if (unlikely(++nr_loops > 1000)) {
1095                 RB_WARN_ON(cpu_buffer, 1);
1096                 return NULL;
1097         }
1098
1099         ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1100
1101         /*
1102          * Only the first commit can update the timestamp.
1103          * Yes there is a race here. If an interrupt comes in
1104          * just after the conditional and it traces too, then it
1105          * will also check the deltas. More than one timestamp may
1106          * also be made. But only the entry that did the actual
1107          * commit will be something other than zero.
1108          */
1109         if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1110             rb_page_write(cpu_buffer->tail_page) ==
1111             rb_commit_index(cpu_buffer)) {
1112
1113                 delta = ts - cpu_buffer->write_stamp;
1114
1115                 /* make sure this delta is calculated here */
1116                 barrier();
1117
1118                 /* Did the write stamp get updated already? */
1119                 if (unlikely(ts < cpu_buffer->write_stamp))
1120                         delta = 0;
1121
1122                 if (test_time_stamp(delta)) {
1123
1124                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1125
1126                         if (commit == -EBUSY)
1127                                 return NULL;
1128
1129                         if (commit == -EAGAIN)
1130                                 goto again;
1131
1132                         RB_WARN_ON(cpu_buffer, commit < 0);
1133                 }
1134         } else
1135                 /* Non commits have zero deltas */
1136                 delta = 0;
1137
1138         event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1139         if (PTR_ERR(event) == -EAGAIN)
1140                 goto again;
1141
1142         if (!event) {
1143                 if (unlikely(commit))
1144                         /*
1145                          * Ouch! We needed a timestamp and it was commited. But
1146                          * we didn't get our event reserved.
1147                          */
1148                         rb_set_commit_to_write(cpu_buffer);
1149                 return NULL;
1150         }
1151
1152         /*
1153          * If the timestamp was commited, make the commit our entry
1154          * now so that we will update it when needed.
1155          */
1156         if (commit)
1157                 rb_set_commit_event(cpu_buffer, event);
1158         else if (!rb_is_commit(cpu_buffer, event))
1159                 delta = 0;
1160
1161         event->time_delta = delta;
1162
1163         return event;
1164 }
1165
1166 static DEFINE_PER_CPU(int, rb_need_resched);
1167
1168 /**
1169  * ring_buffer_lock_reserve - reserve a part of the buffer
1170  * @buffer: the ring buffer to reserve from
1171  * @length: the length of the data to reserve (excluding event header)
1172  * @flags: a pointer to save the interrupt flags
1173  *
1174  * Returns a reseverd event on the ring buffer to copy directly to.
1175  * The user of this interface will need to get the body to write into
1176  * and can use the ring_buffer_event_data() interface.
1177  *
1178  * The length is the length of the data needed, not the event length
1179  * which also includes the event header.
1180  *
1181  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1182  * If NULL is returned, then nothing has been allocated or locked.
1183  */
1184 struct ring_buffer_event *
1185 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1186                          unsigned long length,
1187                          unsigned long *flags)
1188 {
1189         struct ring_buffer_per_cpu *cpu_buffer;
1190         struct ring_buffer_event *event;
1191         int cpu, resched;
1192
1193         if (ring_buffers_off)
1194                 return NULL;
1195
1196         if (atomic_read(&buffer->record_disabled))
1197                 return NULL;
1198
1199         /* If we are tracing schedule, we don't want to recurse */
1200         resched = need_resched();
1201         preempt_disable_notrace();
1202
1203         cpu = raw_smp_processor_id();
1204
1205         if (!cpu_isset(cpu, buffer->cpumask))
1206                 goto out;
1207
1208         cpu_buffer = buffer->buffers[cpu];
1209
1210         if (atomic_read(&cpu_buffer->record_disabled))
1211                 goto out;
1212
1213         length = rb_calculate_event_length(length);
1214         if (length > BUF_PAGE_SIZE)
1215                 goto out;
1216
1217         event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1218         if (!event)
1219                 goto out;
1220
1221         /*
1222          * Need to store resched state on this cpu.
1223          * Only the first needs to.
1224          */
1225
1226         if (preempt_count() == 1)
1227                 per_cpu(rb_need_resched, cpu) = resched;
1228
1229         return event;
1230
1231  out:
1232         if (resched)
1233                 preempt_enable_no_resched_notrace();
1234         else
1235                 preempt_enable_notrace();
1236         return NULL;
1237 }
1238 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1239
1240 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1241                       struct ring_buffer_event *event)
1242 {
1243         cpu_buffer->entries++;
1244
1245         /* Only process further if we own the commit */
1246         if (!rb_is_commit(cpu_buffer, event))
1247                 return;
1248
1249         cpu_buffer->write_stamp += event->time_delta;
1250
1251         rb_set_commit_to_write(cpu_buffer);
1252 }
1253
1254 /**
1255  * ring_buffer_unlock_commit - commit a reserved
1256  * @buffer: The buffer to commit to
1257  * @event: The event pointer to commit.
1258  * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1259  *
1260  * This commits the data to the ring buffer, and releases any locks held.
1261  *
1262  * Must be paired with ring_buffer_lock_reserve.
1263  */
1264 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1265                               struct ring_buffer_event *event,
1266                               unsigned long flags)
1267 {
1268         struct ring_buffer_per_cpu *cpu_buffer;
1269         int cpu = raw_smp_processor_id();
1270
1271         cpu_buffer = buffer->buffers[cpu];
1272
1273         rb_commit(cpu_buffer, event);
1274
1275         /*
1276          * Only the last preempt count needs to restore preemption.
1277          */
1278         if (preempt_count() == 1) {
1279                 if (per_cpu(rb_need_resched, cpu))
1280                         preempt_enable_no_resched_notrace();
1281                 else
1282                         preempt_enable_notrace();
1283         } else
1284                 preempt_enable_no_resched_notrace();
1285
1286         return 0;
1287 }
1288 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1289
1290 /**
1291  * ring_buffer_write - write data to the buffer without reserving
1292  * @buffer: The ring buffer to write to.
1293  * @length: The length of the data being written (excluding the event header)
1294  * @data: The data to write to the buffer.
1295  *
1296  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1297  * one function. If you already have the data to write to the buffer, it
1298  * may be easier to simply call this function.
1299  *
1300  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1301  * and not the length of the event which would hold the header.
1302  */
1303 int ring_buffer_write(struct ring_buffer *buffer,
1304                         unsigned long length,
1305                         void *data)
1306 {
1307         struct ring_buffer_per_cpu *cpu_buffer;
1308         struct ring_buffer_event *event;
1309         unsigned long event_length;
1310         void *body;
1311         int ret = -EBUSY;
1312         int cpu, resched;
1313
1314         if (ring_buffers_off)
1315                 return -EBUSY;
1316
1317         if (atomic_read(&buffer->record_disabled))
1318                 return -EBUSY;
1319
1320         resched = need_resched();
1321         preempt_disable_notrace();
1322
1323         cpu = raw_smp_processor_id();
1324
1325         if (!cpu_isset(cpu, buffer->cpumask))
1326                 goto out;
1327
1328         cpu_buffer = buffer->buffers[cpu];
1329
1330         if (atomic_read(&cpu_buffer->record_disabled))
1331                 goto out;
1332
1333         event_length = rb_calculate_event_length(length);
1334         event = rb_reserve_next_event(cpu_buffer,
1335                                       RINGBUF_TYPE_DATA, event_length);
1336         if (!event)
1337                 goto out;
1338
1339         body = rb_event_data(event);
1340
1341         memcpy(body, data, length);
1342
1343         rb_commit(cpu_buffer, event);
1344
1345         ret = 0;
1346  out:
1347         if (resched)
1348                 preempt_enable_no_resched_notrace();
1349         else
1350                 preempt_enable_notrace();
1351
1352         return ret;
1353 }
1354 EXPORT_SYMBOL_GPL(ring_buffer_write);
1355
1356 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1357 {
1358         struct buffer_page *reader = cpu_buffer->reader_page;
1359         struct buffer_page *head = cpu_buffer->head_page;
1360         struct buffer_page *commit = cpu_buffer->commit_page;
1361
1362         return reader->read == rb_page_commit(reader) &&
1363                 (commit == reader ||
1364                  (commit == head &&
1365                   head->read == rb_page_commit(commit)));
1366 }
1367
1368 /**
1369  * ring_buffer_record_disable - stop all writes into the buffer
1370  * @buffer: The ring buffer to stop writes to.
1371  *
1372  * This prevents all writes to the buffer. Any attempt to write
1373  * to the buffer after this will fail and return NULL.
1374  *
1375  * The caller should call synchronize_sched() after this.
1376  */
1377 void ring_buffer_record_disable(struct ring_buffer *buffer)
1378 {
1379         atomic_inc(&buffer->record_disabled);
1380 }
1381 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1382
1383 /**
1384  * ring_buffer_record_enable - enable writes to the buffer
1385  * @buffer: The ring buffer to enable writes
1386  *
1387  * Note, multiple disables will need the same number of enables
1388  * to truely enable the writing (much like preempt_disable).
1389  */
1390 void ring_buffer_record_enable(struct ring_buffer *buffer)
1391 {
1392         atomic_dec(&buffer->record_disabled);
1393 }
1394 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1395
1396 /**
1397  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1398  * @buffer: The ring buffer to stop writes to.
1399  * @cpu: The CPU buffer to stop
1400  *
1401  * This prevents all writes to the buffer. Any attempt to write
1402  * to the buffer after this will fail and return NULL.
1403  *
1404  * The caller should call synchronize_sched() after this.
1405  */
1406 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1407 {
1408         struct ring_buffer_per_cpu *cpu_buffer;
1409
1410         if (!cpu_isset(cpu, buffer->cpumask))
1411                 return;
1412
1413         cpu_buffer = buffer->buffers[cpu];
1414         atomic_inc(&cpu_buffer->record_disabled);
1415 }
1416 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1417
1418 /**
1419  * ring_buffer_record_enable_cpu - enable writes to the buffer
1420  * @buffer: The ring buffer to enable writes
1421  * @cpu: The CPU to enable.
1422  *
1423  * Note, multiple disables will need the same number of enables
1424  * to truely enable the writing (much like preempt_disable).
1425  */
1426 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1427 {
1428         struct ring_buffer_per_cpu *cpu_buffer;
1429
1430         if (!cpu_isset(cpu, buffer->cpumask))
1431                 return;
1432
1433         cpu_buffer = buffer->buffers[cpu];
1434         atomic_dec(&cpu_buffer->record_disabled);
1435 }
1436 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1437
1438 /**
1439  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1440  * @buffer: The ring buffer
1441  * @cpu: The per CPU buffer to get the entries from.
1442  */
1443 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1444 {
1445         struct ring_buffer_per_cpu *cpu_buffer;
1446
1447         if (!cpu_isset(cpu, buffer->cpumask))
1448                 return 0;
1449
1450         cpu_buffer = buffer->buffers[cpu];
1451         return cpu_buffer->entries;
1452 }
1453 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1454
1455 /**
1456  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1457  * @buffer: The ring buffer
1458  * @cpu: The per CPU buffer to get the number of overruns from
1459  */
1460 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1461 {
1462         struct ring_buffer_per_cpu *cpu_buffer;
1463
1464         if (!cpu_isset(cpu, buffer->cpumask))
1465                 return 0;
1466
1467         cpu_buffer = buffer->buffers[cpu];
1468         return cpu_buffer->overrun;
1469 }
1470 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1471
1472 /**
1473  * ring_buffer_entries - get the number of entries in a buffer
1474  * @buffer: The ring buffer
1475  *
1476  * Returns the total number of entries in the ring buffer
1477  * (all CPU entries)
1478  */
1479 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1480 {
1481         struct ring_buffer_per_cpu *cpu_buffer;
1482         unsigned long entries = 0;
1483         int cpu;
1484
1485         /* if you care about this being correct, lock the buffer */
1486         for_each_buffer_cpu(buffer, cpu) {
1487                 cpu_buffer = buffer->buffers[cpu];
1488                 entries += cpu_buffer->entries;
1489         }
1490
1491         return entries;
1492 }
1493 EXPORT_SYMBOL_GPL(ring_buffer_entries);
1494
1495 /**
1496  * ring_buffer_overrun_cpu - get the number of overruns in buffer
1497  * @buffer: The ring buffer
1498  *
1499  * Returns the total number of overruns in the ring buffer
1500  * (all CPU entries)
1501  */
1502 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1503 {
1504         struct ring_buffer_per_cpu *cpu_buffer;
1505         unsigned long overruns = 0;
1506         int cpu;
1507
1508         /* if you care about this being correct, lock the buffer */
1509         for_each_buffer_cpu(buffer, cpu) {
1510                 cpu_buffer = buffer->buffers[cpu];
1511                 overruns += cpu_buffer->overrun;
1512         }
1513
1514         return overruns;
1515 }
1516 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1517
1518 /**
1519  * ring_buffer_iter_reset - reset an iterator
1520  * @iter: The iterator to reset
1521  *
1522  * Resets the iterator, so that it will start from the beginning
1523  * again.
1524  */
1525 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1526 {
1527         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1528
1529         /* Iterator usage is expected to have record disabled */
1530         if (list_empty(&cpu_buffer->reader_page->list)) {
1531                 iter->head_page = cpu_buffer->head_page;
1532                 iter->head = cpu_buffer->head_page->read;
1533         } else {
1534                 iter->head_page = cpu_buffer->reader_page;
1535                 iter->head = cpu_buffer->reader_page->read;
1536         }
1537         if (iter->head)
1538                 iter->read_stamp = cpu_buffer->read_stamp;
1539         else
1540                 iter->read_stamp = iter->head_page->time_stamp;
1541 }
1542 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1543
1544 /**
1545  * ring_buffer_iter_empty - check if an iterator has no more to read
1546  * @iter: The iterator to check
1547  */
1548 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1549 {
1550         struct ring_buffer_per_cpu *cpu_buffer;
1551
1552         cpu_buffer = iter->cpu_buffer;
1553
1554         return iter->head_page == cpu_buffer->commit_page &&
1555                 iter->head == rb_commit_index(cpu_buffer);
1556 }
1557 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1558
1559 static void
1560 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1561                      struct ring_buffer_event *event)
1562 {
1563         u64 delta;
1564
1565         switch (event->type) {
1566         case RINGBUF_TYPE_PADDING:
1567                 return;
1568
1569         case RINGBUF_TYPE_TIME_EXTEND:
1570                 delta = event->array[0];
1571                 delta <<= TS_SHIFT;
1572                 delta += event->time_delta;
1573                 cpu_buffer->read_stamp += delta;
1574                 return;
1575
1576         case RINGBUF_TYPE_TIME_STAMP:
1577                 /* FIXME: not implemented */
1578                 return;
1579
1580         case RINGBUF_TYPE_DATA:
1581                 cpu_buffer->read_stamp += event->time_delta;
1582                 return;
1583
1584         default:
1585                 BUG();
1586         }
1587         return;
1588 }
1589
1590 static void
1591 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1592                           struct ring_buffer_event *event)
1593 {
1594         u64 delta;
1595
1596         switch (event->type) {
1597         case RINGBUF_TYPE_PADDING:
1598                 return;
1599
1600         case RINGBUF_TYPE_TIME_EXTEND:
1601                 delta = event->array[0];
1602                 delta <<= TS_SHIFT;
1603                 delta += event->time_delta;
1604                 iter->read_stamp += delta;
1605                 return;
1606
1607         case RINGBUF_TYPE_TIME_STAMP:
1608                 /* FIXME: not implemented */
1609                 return;
1610
1611         case RINGBUF_TYPE_DATA:
1612                 iter->read_stamp += event->time_delta;
1613                 return;
1614
1615         default:
1616                 BUG();
1617         }
1618         return;
1619 }
1620
1621 static struct buffer_page *
1622 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1623 {
1624         struct buffer_page *reader = NULL;
1625         unsigned long flags;
1626         int nr_loops = 0;
1627
1628         spin_lock_irqsave(&cpu_buffer->lock, flags);
1629
1630  again:
1631         /*
1632          * This should normally only loop twice. But because the
1633          * start of the reader inserts an empty page, it causes
1634          * a case where we will loop three times. There should be no
1635          * reason to loop four times (that I know of).
1636          */
1637         if (unlikely(++nr_loops > 3)) {
1638                 RB_WARN_ON(cpu_buffer, 1);
1639                 reader = NULL;
1640                 goto out;
1641         }
1642
1643         reader = cpu_buffer->reader_page;
1644
1645         /* If there's more to read, return this page */
1646         if (cpu_buffer->reader_page->read < rb_page_size(reader))
1647                 goto out;
1648
1649         /* Never should we have an index greater than the size */
1650         RB_WARN_ON(cpu_buffer,
1651                    cpu_buffer->reader_page->read > rb_page_size(reader));
1652
1653         /* check if we caught up to the tail */
1654         reader = NULL;
1655         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1656                 goto out;
1657
1658         /*
1659          * Splice the empty reader page into the list around the head.
1660          * Reset the reader page to size zero.
1661          */
1662
1663         reader = cpu_buffer->head_page;
1664         cpu_buffer->reader_page->list.next = reader->list.next;
1665         cpu_buffer->reader_page->list.prev = reader->list.prev;
1666
1667         local_set(&cpu_buffer->reader_page->write, 0);
1668         local_set(&cpu_buffer->reader_page->commit, 0);
1669
1670         /* Make the reader page now replace the head */
1671         reader->list.prev->next = &cpu_buffer->reader_page->list;
1672         reader->list.next->prev = &cpu_buffer->reader_page->list;
1673
1674         /*
1675          * If the tail is on the reader, then we must set the head
1676          * to the inserted page, otherwise we set it one before.
1677          */
1678         cpu_buffer->head_page = cpu_buffer->reader_page;
1679
1680         if (cpu_buffer->commit_page != reader)
1681                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1682
1683         /* Finally update the reader page to the new head */
1684         cpu_buffer->reader_page = reader;
1685         rb_reset_reader_page(cpu_buffer);
1686
1687         goto again;
1688
1689  out:
1690         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1691
1692         return reader;
1693 }
1694
1695 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1696 {
1697         struct ring_buffer_event *event;
1698         struct buffer_page *reader;
1699         unsigned length;
1700
1701         reader = rb_get_reader_page(cpu_buffer);
1702
1703         /* This function should not be called when buffer is empty */
1704         BUG_ON(!reader);
1705
1706         event = rb_reader_event(cpu_buffer);
1707
1708         if (event->type == RINGBUF_TYPE_DATA)
1709                 cpu_buffer->entries--;
1710
1711         rb_update_read_stamp(cpu_buffer, event);
1712
1713         length = rb_event_length(event);
1714         cpu_buffer->reader_page->read += length;
1715 }
1716
1717 static void rb_advance_iter(struct ring_buffer_iter *iter)
1718 {
1719         struct ring_buffer *buffer;
1720         struct ring_buffer_per_cpu *cpu_buffer;
1721         struct ring_buffer_event *event;
1722         unsigned length;
1723
1724         cpu_buffer = iter->cpu_buffer;
1725         buffer = cpu_buffer->buffer;
1726
1727         /*
1728          * Check if we are at the end of the buffer.
1729          */
1730         if (iter->head >= rb_page_size(iter->head_page)) {
1731                 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1732                 rb_inc_iter(iter);
1733                 return;
1734         }
1735
1736         event = rb_iter_head_event(iter);
1737
1738         length = rb_event_length(event);
1739
1740         /*
1741          * This should not be called to advance the header if we are
1742          * at the tail of the buffer.
1743          */
1744         BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1745                (iter->head + length > rb_commit_index(cpu_buffer)));
1746
1747         rb_update_iter_read_stamp(iter, event);
1748
1749         iter->head += length;
1750
1751         /* check for end of page padding */
1752         if ((iter->head >= rb_page_size(iter->head_page)) &&
1753             (iter->head_page != cpu_buffer->commit_page))
1754                 rb_advance_iter(iter);
1755 }
1756
1757 /**
1758  * ring_buffer_peek - peek at the next event to be read
1759  * @buffer: The ring buffer to read
1760  * @cpu: The cpu to peak at
1761  * @ts: The timestamp counter of this event.
1762  *
1763  * This will return the event that will be read next, but does
1764  * not consume the data.
1765  */
1766 struct ring_buffer_event *
1767 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1768 {
1769         struct ring_buffer_per_cpu *cpu_buffer;
1770         struct ring_buffer_event *event;
1771         struct buffer_page *reader;
1772         int nr_loops = 0;
1773
1774         if (!cpu_isset(cpu, buffer->cpumask))
1775                 return NULL;
1776
1777         cpu_buffer = buffer->buffers[cpu];
1778
1779  again:
1780         /*
1781          * We repeat when a timestamp is encountered. It is possible
1782          * to get multiple timestamps from an interrupt entering just
1783          * as one timestamp is about to be written. The max times
1784          * that this can happen is the number of nested interrupts we
1785          * can have.  Nesting 10 deep of interrupts is clearly
1786          * an anomaly.
1787          */
1788         if (unlikely(++nr_loops > 10)) {
1789                 RB_WARN_ON(cpu_buffer, 1);
1790                 return NULL;
1791         }
1792
1793         reader = rb_get_reader_page(cpu_buffer);
1794         if (!reader)
1795                 return NULL;
1796
1797         event = rb_reader_event(cpu_buffer);
1798
1799         switch (event->type) {
1800         case RINGBUF_TYPE_PADDING:
1801                 RB_WARN_ON(cpu_buffer, 1);
1802                 rb_advance_reader(cpu_buffer);
1803                 return NULL;
1804
1805         case RINGBUF_TYPE_TIME_EXTEND:
1806                 /* Internal data, OK to advance */
1807                 rb_advance_reader(cpu_buffer);
1808                 goto again;
1809
1810         case RINGBUF_TYPE_TIME_STAMP:
1811                 /* FIXME: not implemented */
1812                 rb_advance_reader(cpu_buffer);
1813                 goto again;
1814
1815         case RINGBUF_TYPE_DATA:
1816                 if (ts) {
1817                         *ts = cpu_buffer->read_stamp + event->time_delta;
1818                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1819                 }
1820                 return event;
1821
1822         default:
1823                 BUG();
1824         }
1825
1826         return NULL;
1827 }
1828 EXPORT_SYMBOL_GPL(ring_buffer_peek);
1829
1830 /**
1831  * ring_buffer_iter_peek - peek at the next event to be read
1832  * @iter: The ring buffer iterator
1833  * @ts: The timestamp counter of this event.
1834  *
1835  * This will return the event that will be read next, but does
1836  * not increment the iterator.
1837  */
1838 struct ring_buffer_event *
1839 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1840 {
1841         struct ring_buffer *buffer;
1842         struct ring_buffer_per_cpu *cpu_buffer;
1843         struct ring_buffer_event *event;
1844         int nr_loops = 0;
1845
1846         if (ring_buffer_iter_empty(iter))
1847                 return NULL;
1848
1849         cpu_buffer = iter->cpu_buffer;
1850         buffer = cpu_buffer->buffer;
1851
1852  again:
1853         /*
1854          * We repeat when a timestamp is encountered. It is possible
1855          * to get multiple timestamps from an interrupt entering just
1856          * as one timestamp is about to be written. The max times
1857          * that this can happen is the number of nested interrupts we
1858          * can have. Nesting 10 deep of interrupts is clearly
1859          * an anomaly.
1860          */
1861         if (unlikely(++nr_loops > 10)) {
1862                 RB_WARN_ON(cpu_buffer, 1);
1863                 return NULL;
1864         }
1865
1866         if (rb_per_cpu_empty(cpu_buffer))
1867                 return NULL;
1868
1869         event = rb_iter_head_event(iter);
1870
1871         switch (event->type) {
1872         case RINGBUF_TYPE_PADDING:
1873                 rb_inc_iter(iter);
1874                 goto again;
1875
1876         case RINGBUF_TYPE_TIME_EXTEND:
1877                 /* Internal data, OK to advance */
1878                 rb_advance_iter(iter);
1879                 goto again;
1880
1881         case RINGBUF_TYPE_TIME_STAMP:
1882                 /* FIXME: not implemented */
1883                 rb_advance_iter(iter);
1884                 goto again;
1885
1886         case RINGBUF_TYPE_DATA:
1887                 if (ts) {
1888                         *ts = iter->read_stamp + event->time_delta;
1889                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1890                 }
1891                 return event;
1892
1893         default:
1894                 BUG();
1895         }
1896
1897         return NULL;
1898 }
1899 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1900
1901 /**
1902  * ring_buffer_consume - return an event and consume it
1903  * @buffer: The ring buffer to get the next event from
1904  *
1905  * Returns the next event in the ring buffer, and that event is consumed.
1906  * Meaning, that sequential reads will keep returning a different event,
1907  * and eventually empty the ring buffer if the producer is slower.
1908  */
1909 struct ring_buffer_event *
1910 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1911 {
1912         struct ring_buffer_per_cpu *cpu_buffer;
1913         struct ring_buffer_event *event;
1914
1915         if (!cpu_isset(cpu, buffer->cpumask))
1916                 return NULL;
1917
1918         event = ring_buffer_peek(buffer, cpu, ts);
1919         if (!event)
1920                 return NULL;
1921
1922         cpu_buffer = buffer->buffers[cpu];
1923         rb_advance_reader(cpu_buffer);
1924
1925         return event;
1926 }
1927 EXPORT_SYMBOL_GPL(ring_buffer_consume);
1928
1929 /**
1930  * ring_buffer_read_start - start a non consuming read of the buffer
1931  * @buffer: The ring buffer to read from
1932  * @cpu: The cpu buffer to iterate over
1933  *
1934  * This starts up an iteration through the buffer. It also disables
1935  * the recording to the buffer until the reading is finished.
1936  * This prevents the reading from being corrupted. This is not
1937  * a consuming read, so a producer is not expected.
1938  *
1939  * Must be paired with ring_buffer_finish.
1940  */
1941 struct ring_buffer_iter *
1942 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1943 {
1944         struct ring_buffer_per_cpu *cpu_buffer;
1945         struct ring_buffer_iter *iter;
1946         unsigned long flags;
1947
1948         if (!cpu_isset(cpu, buffer->cpumask))
1949                 return NULL;
1950
1951         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1952         if (!iter)
1953                 return NULL;
1954
1955         cpu_buffer = buffer->buffers[cpu];
1956
1957         iter->cpu_buffer = cpu_buffer;
1958
1959         atomic_inc(&cpu_buffer->record_disabled);
1960         synchronize_sched();
1961
1962         spin_lock_irqsave(&cpu_buffer->lock, flags);
1963         ring_buffer_iter_reset(iter);
1964         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1965
1966         return iter;
1967 }
1968 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
1969
1970 /**
1971  * ring_buffer_finish - finish reading the iterator of the buffer
1972  * @iter: The iterator retrieved by ring_buffer_start
1973  *
1974  * This re-enables the recording to the buffer, and frees the
1975  * iterator.
1976  */
1977 void
1978 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1979 {
1980         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1981
1982         atomic_dec(&cpu_buffer->record_disabled);
1983         kfree(iter);
1984 }
1985 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
1986
1987 /**
1988  * ring_buffer_read - read the next item in the ring buffer by the iterator
1989  * @iter: The ring buffer iterator
1990  * @ts: The time stamp of the event read.
1991  *
1992  * This reads the next event in the ring buffer and increments the iterator.
1993  */
1994 struct ring_buffer_event *
1995 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1996 {
1997         struct ring_buffer_event *event;
1998
1999         event = ring_buffer_iter_peek(iter, ts);
2000         if (!event)
2001                 return NULL;
2002
2003         rb_advance_iter(iter);
2004
2005         return event;
2006 }
2007 EXPORT_SYMBOL_GPL(ring_buffer_read);
2008
2009 /**
2010  * ring_buffer_size - return the size of the ring buffer (in bytes)
2011  * @buffer: The ring buffer.
2012  */
2013 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2014 {
2015         return BUF_PAGE_SIZE * buffer->pages;
2016 }
2017 EXPORT_SYMBOL_GPL(ring_buffer_size);
2018
2019 static void
2020 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2021 {
2022         cpu_buffer->head_page
2023                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2024         local_set(&cpu_buffer->head_page->write, 0);
2025         local_set(&cpu_buffer->head_page->commit, 0);
2026
2027         cpu_buffer->head_page->read = 0;
2028
2029         cpu_buffer->tail_page = cpu_buffer->head_page;
2030         cpu_buffer->commit_page = cpu_buffer->head_page;
2031
2032         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2033         local_set(&cpu_buffer->reader_page->write, 0);
2034         local_set(&cpu_buffer->reader_page->commit, 0);
2035         cpu_buffer->reader_page->read = 0;
2036
2037         cpu_buffer->overrun = 0;
2038         cpu_buffer->entries = 0;
2039 }
2040
2041 /**
2042  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2043  * @buffer: The ring buffer to reset a per cpu buffer of
2044  * @cpu: The CPU buffer to be reset
2045  */
2046 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2047 {
2048         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2049         unsigned long flags;
2050
2051         if (!cpu_isset(cpu, buffer->cpumask))
2052                 return;
2053
2054         spin_lock_irqsave(&cpu_buffer->lock, flags);
2055
2056         rb_reset_cpu(cpu_buffer);
2057
2058         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
2059 }
2060 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2061
2062 /**
2063  * ring_buffer_reset - reset a ring buffer
2064  * @buffer: The ring buffer to reset all cpu buffers
2065  */
2066 void ring_buffer_reset(struct ring_buffer *buffer)
2067 {
2068         int cpu;
2069
2070         for_each_buffer_cpu(buffer, cpu)
2071                 ring_buffer_reset_cpu(buffer, cpu);
2072 }
2073 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2074
2075 /**
2076  * rind_buffer_empty - is the ring buffer empty?
2077  * @buffer: The ring buffer to test
2078  */
2079 int ring_buffer_empty(struct ring_buffer *buffer)
2080 {
2081         struct ring_buffer_per_cpu *cpu_buffer;
2082         int cpu;
2083
2084         /* yes this is racy, but if you don't like the race, lock the buffer */
2085         for_each_buffer_cpu(buffer, cpu) {
2086                 cpu_buffer = buffer->buffers[cpu];
2087                 if (!rb_per_cpu_empty(cpu_buffer))
2088                         return 0;
2089         }
2090         return 1;
2091 }
2092 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2093
2094 /**
2095  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2096  * @buffer: The ring buffer
2097  * @cpu: The CPU buffer to test
2098  */
2099 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2100 {
2101         struct ring_buffer_per_cpu *cpu_buffer;
2102
2103         if (!cpu_isset(cpu, buffer->cpumask))
2104                 return 1;
2105
2106         cpu_buffer = buffer->buffers[cpu];
2107         return rb_per_cpu_empty(cpu_buffer);
2108 }
2109 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2110
2111 /**
2112  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2113  * @buffer_a: One buffer to swap with
2114  * @buffer_b: The other buffer to swap with
2115  *
2116  * This function is useful for tracers that want to take a "snapshot"
2117  * of a CPU buffer and has another back up buffer lying around.
2118  * it is expected that the tracer handles the cpu buffer not being
2119  * used at the moment.
2120  */
2121 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2122                          struct ring_buffer *buffer_b, int cpu)
2123 {
2124         struct ring_buffer_per_cpu *cpu_buffer_a;
2125         struct ring_buffer_per_cpu *cpu_buffer_b;
2126
2127         if (!cpu_isset(cpu, buffer_a->cpumask) ||
2128             !cpu_isset(cpu, buffer_b->cpumask))
2129                 return -EINVAL;
2130
2131         /* At least make sure the two buffers are somewhat the same */
2132         if (buffer_a->size != buffer_b->size ||
2133             buffer_a->pages != buffer_b->pages)
2134                 return -EINVAL;
2135
2136         cpu_buffer_a = buffer_a->buffers[cpu];
2137         cpu_buffer_b = buffer_b->buffers[cpu];
2138
2139         /*
2140          * We can't do a synchronize_sched here because this
2141          * function can be called in atomic context.
2142          * Normally this will be called from the same CPU as cpu.
2143          * If not it's up to the caller to protect this.
2144          */
2145         atomic_inc(&cpu_buffer_a->record_disabled);
2146         atomic_inc(&cpu_buffer_b->record_disabled);
2147
2148         buffer_a->buffers[cpu] = cpu_buffer_b;
2149         buffer_b->buffers[cpu] = cpu_buffer_a;
2150
2151         cpu_buffer_b->buffer = buffer_a;
2152         cpu_buffer_a->buffer = buffer_b;
2153
2154         atomic_dec(&cpu_buffer_a->record_disabled);
2155         atomic_dec(&cpu_buffer_b->record_disabled);
2156
2157         return 0;
2158 }
2159 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2160
2161 static ssize_t
2162 rb_simple_read(struct file *filp, char __user *ubuf,
2163                size_t cnt, loff_t *ppos)
2164 {
2165         int *p = filp->private_data;
2166         char buf[64];
2167         int r;
2168
2169         /* !ring_buffers_off == tracing_on */
2170         r = sprintf(buf, "%d\n", !*p);
2171
2172         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2173 }
2174
2175 static ssize_t
2176 rb_simple_write(struct file *filp, const char __user *ubuf,
2177                 size_t cnt, loff_t *ppos)
2178 {
2179         int *p = filp->private_data;
2180         char buf[64];
2181         long val;
2182         int ret;
2183
2184         if (cnt >= sizeof(buf))
2185                 return -EINVAL;
2186
2187         if (copy_from_user(&buf, ubuf, cnt))
2188                 return -EFAULT;
2189
2190         buf[cnt] = 0;
2191
2192         ret = strict_strtoul(buf, 10, &val);
2193         if (ret < 0)
2194                 return ret;
2195
2196         /* !ring_buffers_off == tracing_on */
2197         *p = !val;
2198
2199         (*ppos)++;
2200
2201         return cnt;
2202 }
2203
2204 static struct file_operations rb_simple_fops = {
2205         .open           = tracing_open_generic,
2206         .read           = rb_simple_read,
2207         .write          = rb_simple_write,
2208 };
2209
2210
2211 static __init int rb_init_debugfs(void)
2212 {
2213         struct dentry *d_tracer;
2214         struct dentry *entry;
2215
2216         d_tracer = tracing_init_dentry();
2217
2218         entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2219                                     &ring_buffers_off, &rb_simple_fops);
2220         if (!entry)
2221                 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2222
2223         return 0;
2224 }
2225
2226 fs_initcall(rb_init_debugfs);