1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic ring buffer
4 *
5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 */
7 #include <linux/trace_recursion.h>
8 #include <linux/trace_events.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/trace_clock.h>
11 #include <linux/sched/clock.h>
12 #include <linux/cacheflush.h>
13 #include <linux/trace_seq.h>
14 #include <linux/spinlock.h>
15 #include <linux/irq_work.h>
16 #include <linux/security.h>
17 #include <linux/uaccess.h>
18 #include <linux/hardirq.h>
19 #include <linux/kthread.h> /* for self test */
20 #include <linux/module.h>
21 #include <linux/percpu.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/list.h>
28 #include <linux/cpu.h>
29 #include <linux/oom.h>
30 #include <linux/mm.h>
31
32 #include <asm/local64.h>
33 #include <asm/local.h>
34 #include <asm/setup.h>
35
36 #include "trace.h"
37
38 /*
39 * The "absolute" timestamp in the buffer is only 59 bits.
40 * If a clock has the 5 MSBs set, it needs to be saved and
41 * reinserted.
42 */
43 #define TS_MSB (0xf8ULL << 56)
44 #define ABS_TS_MASK (~TS_MSB)
45
46 static void update_pages_handler(struct work_struct *work);
47
48 #define RING_BUFFER_META_MAGIC 0xBADFEED
49
50 struct ring_buffer_meta {
51 int magic;
52 int struct_sizes;
53 unsigned long total_size;
54 unsigned long buffers_offset;
55 };
56
57 struct ring_buffer_cpu_meta {
58 unsigned long first_buffer;
59 unsigned long head_buffer;
60 unsigned long commit_buffer;
61 __u32 subbuf_size;
62 __u32 nr_subbufs;
63 int buffers[];
64 };
65
66 /*
67 * The ring buffer header is special. We must manually up keep it.
68 */
ring_buffer_print_entry_header(struct trace_seq * s)69 int ring_buffer_print_entry_header(struct trace_seq *s)
70 {
71 trace_seq_puts(s, "# compressed entry header\n");
72 trace_seq_puts(s, "\ttype_len : 5 bits\n");
73 trace_seq_puts(s, "\ttime_delta : 27 bits\n");
74 trace_seq_puts(s, "\tarray : 32 bits\n");
75 trace_seq_putc(s, '\n');
76 trace_seq_printf(s, "\tpadding : type == %d\n",
77 RINGBUF_TYPE_PADDING);
78 trace_seq_printf(s, "\ttime_extend : type == %d\n",
79 RINGBUF_TYPE_TIME_EXTEND);
80 trace_seq_printf(s, "\ttime_stamp : type == %d\n",
81 RINGBUF_TYPE_TIME_STAMP);
82 trace_seq_printf(s, "\tdata max type_len == %d\n",
83 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
84
85 return !trace_seq_has_overflowed(s);
86 }
87
88 /*
89 * The ring buffer is made up of a list of pages. A separate list of pages is
90 * allocated for each CPU. A writer may only write to a buffer that is
91 * associated with the CPU it is currently executing on. A reader may read
92 * from any per cpu buffer.
93 *
94 * The reader is special. For each per cpu buffer, the reader has its own
95 * reader page. When a reader has read the entire reader page, this reader
96 * page is swapped with another page in the ring buffer.
97 *
98 * Now, as long as the writer is off the reader page, the reader can do what
99 * ever it wants with that page. The writer will never write to that page
100 * again (as long as it is out of the ring buffer).
101 *
102 * Here's some silly ASCII art.
103 *
104 * +------+
105 * |reader| RING BUFFER
106 * |page |
107 * +------+ +---+ +---+ +---+
108 * | |-->| |-->| |
109 * +---+ +---+ +---+
110 * ^ |
111 * | |
112 * +---------------+
113 *
114 *
115 * +------+
116 * |reader| RING BUFFER
117 * |page |------------------v
118 * +------+ +---+ +---+ +---+
119 * | |-->| |-->| |
120 * +---+ +---+ +---+
121 * ^ |
122 * | |
123 * +---------------+
124 *
125 *
126 * +------+
127 * |reader| RING BUFFER
128 * |page |------------------v
129 * +------+ +---+ +---+ +---+
130 * ^ | |-->| |-->| |
131 * | +---+ +---+ +---+
132 * | |
133 * | |
134 * +------------------------------+
135 *
136 *
137 * +------+
138 * |buffer| RING BUFFER
139 * |page |------------------v
140 * +------+ +---+ +---+ +---+
141 * ^ | | | |-->| |
142 * | New +---+ +---+ +---+
143 * | Reader------^ |
144 * | page |
145 * +------------------------------+
146 *
147 *
148 * After we make this swap, the reader can hand this page off to the splice
149 * code and be done with it. It can even allocate a new page if it needs to
150 * and swap that into the ring buffer.
151 *
152 * We will be using cmpxchg soon to make all this lockless.
153 *
154 */
155
156 /* Used for individual buffers (after the counter) */
157 #define RB_BUFFER_OFF (1 << 20)
158
159 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
160
161 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
162 #define RB_ALIGNMENT 4U
163 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
164 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
165
166 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
167 # define RB_FORCE_8BYTE_ALIGNMENT 0
168 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
169 #else
170 # define RB_FORCE_8BYTE_ALIGNMENT 1
171 # define RB_ARCH_ALIGNMENT 8U
172 #endif
173
174 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
175
176 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
177 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
178
179 enum {
180 RB_LEN_TIME_EXTEND = 8,
181 RB_LEN_TIME_STAMP = 8,
182 };
183
184 #define skip_time_extend(event) \
185 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
186
187 #define extended_time(event) \
188 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
189
rb_null_event(struct ring_buffer_event * event)190 static inline bool rb_null_event(struct ring_buffer_event *event)
191 {
192 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
193 }
194
rb_event_set_padding(struct ring_buffer_event * event)195 static void rb_event_set_padding(struct ring_buffer_event *event)
196 {
197 /* padding has a NULL time_delta */
198 event->type_len = RINGBUF_TYPE_PADDING;
199 event->time_delta = 0;
200 }
201
202 static unsigned
rb_event_data_length(struct ring_buffer_event * event)203 rb_event_data_length(struct ring_buffer_event *event)
204 {
205 unsigned length;
206
207 if (event->type_len)
208 length = event->type_len * RB_ALIGNMENT;
209 else
210 length = event->array[0];
211 return length + RB_EVNT_HDR_SIZE;
212 }
213
214 /*
215 * Return the length of the given event. Will return
216 * the length of the time extend if the event is a
217 * time extend.
218 */
219 static inline unsigned
rb_event_length(struct ring_buffer_event * event)220 rb_event_length(struct ring_buffer_event *event)
221 {
222 switch (event->type_len) {
223 case RINGBUF_TYPE_PADDING:
224 if (rb_null_event(event))
225 /* undefined */
226 return -1;
227 return event->array[0] + RB_EVNT_HDR_SIZE;
228
229 case RINGBUF_TYPE_TIME_EXTEND:
230 return RB_LEN_TIME_EXTEND;
231
232 case RINGBUF_TYPE_TIME_STAMP:
233 return RB_LEN_TIME_STAMP;
234
235 case RINGBUF_TYPE_DATA:
236 return rb_event_data_length(event);
237 default:
238 WARN_ON_ONCE(1);
239 }
240 /* not hit */
241 return 0;
242 }
243
244 /*
245 * Return total length of time extend and data,
246 * or just the event length for all other events.
247 */
248 static inline unsigned
rb_event_ts_length(struct ring_buffer_event * event)249 rb_event_ts_length(struct ring_buffer_event *event)
250 {
251 unsigned len = 0;
252
253 if (extended_time(event)) {
254 /* time extends include the data event after it */
255 len = RB_LEN_TIME_EXTEND;
256 event = skip_time_extend(event);
257 }
258 return len + rb_event_length(event);
259 }
260
261 /**
262 * ring_buffer_event_length - return the length of the event
263 * @event: the event to get the length of
264 *
265 * Returns the size of the data load of a data event.
266 * If the event is something other than a data event, it
267 * returns the size of the event itself. With the exception
268 * of a TIME EXTEND, where it still returns the size of the
269 * data load of the data event after it.
270 */
ring_buffer_event_length(struct ring_buffer_event * event)271 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272 {
273 unsigned length;
274
275 if (extended_time(event))
276 event = skip_time_extend(event);
277
278 length = rb_event_length(event);
279 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
280 return length;
281 length -= RB_EVNT_HDR_SIZE;
282 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283 length -= sizeof(event->array[0]);
284 return length;
285 }
286 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
287
288 /* inline for ring buffer fast paths */
289 static __always_inline void *
rb_event_data(struct ring_buffer_event * event)290 rb_event_data(struct ring_buffer_event *event)
291 {
292 if (extended_time(event))
293 event = skip_time_extend(event);
294 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
295 /* If length is in len field, then array[0] has the data */
296 if (event->type_len)
297 return (void *)&event->array[0];
298 /* Otherwise length is in array[0] and array[1] has the data */
299 return (void *)&event->array[1];
300 }
301
302 /**
303 * ring_buffer_event_data - return the data of the event
304 * @event: the event to get the data from
305 */
ring_buffer_event_data(struct ring_buffer_event * event)306 void *ring_buffer_event_data(struct ring_buffer_event *event)
307 {
308 return rb_event_data(event);
309 }
310 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
311
312 #define for_each_buffer_cpu(buffer, cpu) \
313 for_each_cpu(cpu, buffer->cpumask)
314
315 #define for_each_online_buffer_cpu(buffer, cpu) \
316 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
317
318 #define TS_SHIFT 27
319 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
320 #define TS_DELTA_TEST (~TS_MASK)
321
rb_event_time_stamp(struct ring_buffer_event * event)322 static u64 rb_event_time_stamp(struct ring_buffer_event *event)
323 {
324 u64 ts;
325
326 ts = event->array[0];
327 ts <<= TS_SHIFT;
328 ts += event->time_delta;
329
330 return ts;
331 }
332
333 /* Flag when events were overwritten */
334 #define RB_MISSED_EVENTS (1 << 31)
335 /* Missed count stored at end */
336 #define RB_MISSED_STORED (1 << 30)
337
338 #define RB_MISSED_MASK (3 << 30)
339
340 struct buffer_data_page {
341 u64 time_stamp; /* page time stamp */
342 local_t commit; /* write committed index */
343 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
344 };
345
346 struct buffer_data_read_page {
347 unsigned order; /* order of the page */
348 struct buffer_data_page *data; /* actual data, stored in this page */
349 };
350
351 /*
352 * Note, the buffer_page list must be first. The buffer pages
353 * are allocated in cache lines, which means that each buffer
354 * page will be at the beginning of a cache line, and thus
355 * the least significant bits will be zero. We use this to
356 * add flags in the list struct pointers, to make the ring buffer
357 * lockless.
358 */
359 struct buffer_page {
360 struct list_head list; /* list of buffer pages */
361 local_t write; /* index for next write */
362 unsigned read; /* index for next read */
363 local_t entries; /* entries on this page */
364 unsigned long real_end; /* real end of data */
365 unsigned order; /* order of the page */
366 u32 id:30; /* ID for external mapping */
367 u32 range:1; /* Mapped via a range */
368 struct buffer_data_page *page; /* Actual data page */
369 };
370
371 /*
372 * The buffer page counters, write and entries, must be reset
373 * atomically when crossing page boundaries. To synchronize this
374 * update, two counters are inserted into the number. One is
375 * the actual counter for the write position or count on the page.
376 *
377 * The other is a counter of updaters. Before an update happens
378 * the update partition of the counter is incremented. This will
379 * allow the updater to update the counter atomically.
380 *
381 * The counter is 20 bits, and the state data is 12.
382 */
383 #define RB_WRITE_MASK 0xfffff
384 #define RB_WRITE_INTCNT (1 << 20)
385
rb_init_page(struct buffer_data_page * bpage)386 static void rb_init_page(struct buffer_data_page *bpage)
387 {
388 local_set(&bpage->commit, 0);
389 }
390
rb_page_commit(struct buffer_page * bpage)391 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
392 {
393 return local_read(&bpage->page->commit);
394 }
395
free_buffer_page(struct buffer_page * bpage)396 static void free_buffer_page(struct buffer_page *bpage)
397 {
398 /* Range pages are not to be freed */
399 if (!bpage->range)
400 free_pages((unsigned long)bpage->page, bpage->order);
401 kfree(bpage);
402 }
403
404 /*
405 * We need to fit the time_stamp delta into 27 bits.
406 */
test_time_stamp(u64 delta)407 static inline bool test_time_stamp(u64 delta)
408 {
409 return !!(delta & TS_DELTA_TEST);
410 }
411
412 struct rb_irq_work {
413 struct irq_work work;
414 wait_queue_head_t waiters;
415 wait_queue_head_t full_waiters;
416 atomic_t seq;
417 bool waiters_pending;
418 bool full_waiters_pending;
419 bool wakeup_full;
420 };
421
422 /*
423 * Structure to hold event state and handle nested events.
424 */
425 struct rb_event_info {
426 u64 ts;
427 u64 delta;
428 u64 before;
429 u64 after;
430 unsigned long length;
431 struct buffer_page *tail_page;
432 int add_timestamp;
433 };
434
435 /*
436 * Used for the add_timestamp
437 * NONE
438 * EXTEND - wants a time extend
439 * ABSOLUTE - the buffer requests all events to have absolute time stamps
440 * FORCE - force a full time stamp.
441 */
442 enum {
443 RB_ADD_STAMP_NONE = 0,
444 RB_ADD_STAMP_EXTEND = BIT(1),
445 RB_ADD_STAMP_ABSOLUTE = BIT(2),
446 RB_ADD_STAMP_FORCE = BIT(3)
447 };
448 /*
449 * Used for which event context the event is in.
450 * TRANSITION = 0
451 * NMI = 1
452 * IRQ = 2
453 * SOFTIRQ = 3
454 * NORMAL = 4
455 *
456 * See trace_recursive_lock() comment below for more details.
457 */
458 enum {
459 RB_CTX_TRANSITION,
460 RB_CTX_NMI,
461 RB_CTX_IRQ,
462 RB_CTX_SOFTIRQ,
463 RB_CTX_NORMAL,
464 RB_CTX_MAX
465 };
466
467 struct rb_time_struct {
468 local64_t time;
469 };
470 typedef struct rb_time_struct rb_time_t;
471
472 #define MAX_NEST 5
473
474 /*
475 * head_page == tail_page && head == tail then buffer is empty.
476 */
477 struct ring_buffer_per_cpu {
478 int cpu;
479 atomic_t record_disabled;
480 atomic_t resize_disabled;
481 struct trace_buffer *buffer;
482 raw_spinlock_t reader_lock; /* serialize readers */
483 arch_spinlock_t lock;
484 struct lock_class_key lock_key;
485 struct buffer_data_page *free_page;
486 unsigned long nr_pages;
487 unsigned int current_context;
488 struct list_head *pages;
489 /* pages generation counter, incremented when the list changes */
490 unsigned long cnt;
491 struct buffer_page *head_page; /* read from head */
492 struct buffer_page *tail_page; /* write to tail */
493 struct buffer_page *commit_page; /* committed pages */
494 struct buffer_page *reader_page;
495 unsigned long lost_events;
496 unsigned long last_overrun;
497 unsigned long nest;
498 local_t entries_bytes;
499 local_t entries;
500 local_t overrun;
501 local_t commit_overrun;
502 local_t dropped_events;
503 local_t committing;
504 local_t commits;
505 local_t pages_touched;
506 local_t pages_lost;
507 local_t pages_read;
508 long last_pages_touch;
509 size_t shortest_full;
510 unsigned long read;
511 unsigned long read_bytes;
512 rb_time_t write_stamp;
513 rb_time_t before_stamp;
514 u64 event_stamp[MAX_NEST];
515 u64 read_stamp;
516 /* pages removed since last reset */
517 unsigned long pages_removed;
518
519 unsigned int mapped;
520 unsigned int user_mapped; /* user space mapping */
521 struct mutex mapping_lock;
522 unsigned long *subbuf_ids; /* ID to subbuf VA */
523 struct trace_buffer_meta *meta_page;
524 struct ring_buffer_cpu_meta *ring_meta;
525
526 /* ring buffer pages to update, > 0 to add, < 0 to remove */
527 long nr_pages_to_update;
528 struct list_head new_pages; /* new pages to add */
529 struct work_struct update_pages_work;
530 struct completion update_done;
531
532 struct rb_irq_work irq_work;
533 };
534
535 struct trace_buffer {
536 unsigned flags;
537 int cpus;
538 atomic_t record_disabled;
539 atomic_t resizing;
540 cpumask_var_t cpumask;
541
542 struct lock_class_key *reader_lock_key;
543
544 struct mutex mutex;
545
546 struct ring_buffer_per_cpu **buffers;
547
548 struct hlist_node node;
549 u64 (*clock)(void);
550
551 struct rb_irq_work irq_work;
552 bool time_stamp_abs;
553
554 unsigned long range_addr_start;
555 unsigned long range_addr_end;
556
557 struct ring_buffer_meta *meta;
558
559 unsigned int subbuf_size;
560 unsigned int subbuf_order;
561 unsigned int max_data_size;
562 };
563
564 struct ring_buffer_iter {
565 struct ring_buffer_per_cpu *cpu_buffer;
566 unsigned long head;
567 unsigned long next_event;
568 struct buffer_page *head_page;
569 struct buffer_page *cache_reader_page;
570 unsigned long cache_read;
571 unsigned long cache_pages_removed;
572 u64 read_stamp;
573 u64 page_stamp;
574 struct ring_buffer_event *event;
575 size_t event_size;
576 int missed_events;
577 };
578
ring_buffer_print_page_header(struct trace_buffer * buffer,struct trace_seq * s)579 int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
580 {
581 struct buffer_data_page field;
582
583 trace_seq_printf(s, "\tfield: u64 timestamp;\t"
584 "offset:0;\tsize:%u;\tsigned:%u;\n",
585 (unsigned int)sizeof(field.time_stamp),
586 (unsigned int)is_signed_type(u64));
587
588 trace_seq_printf(s, "\tfield: local_t commit;\t"
589 "offset:%u;\tsize:%u;\tsigned:%u;\n",
590 (unsigned int)offsetof(typeof(field), commit),
591 (unsigned int)sizeof(field.commit),
592 (unsigned int)is_signed_type(long));
593
594 trace_seq_printf(s, "\tfield: int overwrite;\t"
595 "offset:%u;\tsize:%u;\tsigned:%u;\n",
596 (unsigned int)offsetof(typeof(field), commit),
597 1,
598 (unsigned int)is_signed_type(long));
599
600 trace_seq_printf(s, "\tfield: char data;\t"
601 "offset:%u;\tsize:%u;\tsigned:%u;\n",
602 (unsigned int)offsetof(typeof(field), data),
603 (unsigned int)buffer->subbuf_size,
604 (unsigned int)is_signed_type(char));
605
606 return !trace_seq_has_overflowed(s);
607 }
608
rb_time_read(rb_time_t * t,u64 * ret)609 static inline void rb_time_read(rb_time_t *t, u64 *ret)
610 {
611 *ret = local64_read(&t->time);
612 }
rb_time_set(rb_time_t * t,u64 val)613 static void rb_time_set(rb_time_t *t, u64 val)
614 {
615 local64_set(&t->time, val);
616 }
617
618 /*
619 * Enable this to make sure that the event passed to
620 * ring_buffer_event_time_stamp() is not committed and also
621 * is on the buffer that it passed in.
622 */
623 //#define RB_VERIFY_EVENT
624 #ifdef RB_VERIFY_EVENT
625 static struct list_head *rb_list_head(struct list_head *list);
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)626 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
627 void *event)
628 {
629 struct buffer_page *page = cpu_buffer->commit_page;
630 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
631 struct list_head *next;
632 long commit, write;
633 unsigned long addr = (unsigned long)event;
634 bool done = false;
635 int stop = 0;
636
637 /* Make sure the event exists and is not committed yet */
638 do {
639 if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
640 done = true;
641 commit = local_read(&page->page->commit);
642 write = local_read(&page->write);
643 if (addr >= (unsigned long)&page->page->data[commit] &&
644 addr < (unsigned long)&page->page->data[write])
645 return;
646
647 next = rb_list_head(page->list.next);
648 page = list_entry(next, struct buffer_page, list);
649 } while (!done);
650 WARN_ON_ONCE(1);
651 }
652 #else
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)653 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
654 void *event)
655 {
656 }
657 #endif
658
659 /*
660 * The absolute time stamp drops the 5 MSBs and some clocks may
661 * require them. The rb_fix_abs_ts() will take a previous full
662 * time stamp, and add the 5 MSB of that time stamp on to the
663 * saved absolute time stamp. Then they are compared in case of
664 * the unlikely event that the latest time stamp incremented
665 * the 5 MSB.
666 */
rb_fix_abs_ts(u64 abs,u64 save_ts)667 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
668 {
669 if (save_ts & TS_MSB) {
670 abs |= save_ts & TS_MSB;
671 /* Check for overflow */
672 if (unlikely(abs < save_ts))
673 abs += 1ULL << 59;
674 }
675 return abs;
676 }
677
678 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
679
680 /**
681 * ring_buffer_event_time_stamp - return the event's current time stamp
682 * @buffer: The buffer that the event is on
683 * @event: the event to get the time stamp of
684 *
685 * Note, this must be called after @event is reserved, and before it is
686 * committed to the ring buffer. And must be called from the same
687 * context where the event was reserved (normal, softirq, irq, etc).
688 *
689 * Returns the time stamp associated with the current event.
690 * If the event has an extended time stamp, then that is used as
691 * the time stamp to return.
692 * In the highly unlikely case that the event was nested more than
693 * the max nesting, then the write_stamp of the buffer is returned,
694 * otherwise current time is returned, but that really neither of
695 * the last two cases should ever happen.
696 */
ring_buffer_event_time_stamp(struct trace_buffer * buffer,struct ring_buffer_event * event)697 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
698 struct ring_buffer_event *event)
699 {
700 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
701 unsigned int nest;
702 u64 ts;
703
704 /* If the event includes an absolute time, then just use that */
705 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
706 ts = rb_event_time_stamp(event);
707 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
708 }
709
710 nest = local_read(&cpu_buffer->committing);
711 verify_event(cpu_buffer, event);
712 if (WARN_ON_ONCE(!nest))
713 goto fail;
714
715 /* Read the current saved nesting level time stamp */
716 if (likely(--nest < MAX_NEST))
717 return cpu_buffer->event_stamp[nest];
718
719 /* Shouldn't happen, warn if it does */
720 WARN_ONCE(1, "nest (%d) greater than max", nest);
721
722 fail:
723 rb_time_read(&cpu_buffer->write_stamp, &ts);
724
725 return ts;
726 }
727
728 /**
729 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
730 * @buffer: The ring_buffer to get the number of pages from
731 * @cpu: The cpu of the ring_buffer to get the number of pages from
732 *
733 * Returns the number of pages that have content in the ring buffer.
734 */
ring_buffer_nr_dirty_pages(struct trace_buffer * buffer,int cpu)735 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
736 {
737 size_t read;
738 size_t lost;
739 size_t cnt;
740
741 read = local_read(&buffer->buffers[cpu]->pages_read);
742 lost = local_read(&buffer->buffers[cpu]->pages_lost);
743 cnt = local_read(&buffer->buffers[cpu]->pages_touched);
744
745 if (WARN_ON_ONCE(cnt < lost))
746 return 0;
747
748 cnt -= lost;
749
750 /* The reader can read an empty page, but not more than that */
751 if (cnt < read) {
752 WARN_ON_ONCE(read > cnt + 1);
753 return 0;
754 }
755
756 return cnt - read;
757 }
758
full_hit(struct trace_buffer * buffer,int cpu,int full)759 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
760 {
761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
762 size_t nr_pages;
763 size_t dirty;
764
765 nr_pages = cpu_buffer->nr_pages;
766 if (!nr_pages || !full)
767 return true;
768
769 /*
770 * Add one as dirty will never equal nr_pages, as the sub-buffer
771 * that the writer is on is not counted as dirty.
772 * This is needed if "buffer_percent" is set to 100.
773 */
774 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
775
776 return (dirty * 100) >= (full * nr_pages);
777 }
778
779 /*
780 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
781 *
782 * Schedules a delayed work to wake up any task that is blocked on the
783 * ring buffer waiters queue.
784 */
rb_wake_up_waiters(struct irq_work * work)785 static void rb_wake_up_waiters(struct irq_work *work)
786 {
787 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
788
789 /* For waiters waiting for the first wake up */
790 (void)atomic_fetch_inc_release(&rbwork->seq);
791
792 wake_up_all(&rbwork->waiters);
793 if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
794 /* Only cpu_buffer sets the above flags */
795 struct ring_buffer_per_cpu *cpu_buffer =
796 container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
797
798 /* Called from interrupt context */
799 raw_spin_lock(&cpu_buffer->reader_lock);
800 rbwork->wakeup_full = false;
801 rbwork->full_waiters_pending = false;
802
803 /* Waking up all waiters, they will reset the shortest full */
804 cpu_buffer->shortest_full = 0;
805 raw_spin_unlock(&cpu_buffer->reader_lock);
806
807 wake_up_all(&rbwork->full_waiters);
808 }
809 }
810
811 /**
812 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
813 * @buffer: The ring buffer to wake waiters on
814 * @cpu: The CPU buffer to wake waiters on
815 *
816 * In the case of a file that represents a ring buffer is closing,
817 * it is prudent to wake up any waiters that are on this.
818 */
ring_buffer_wake_waiters(struct trace_buffer * buffer,int cpu)819 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
820 {
821 struct ring_buffer_per_cpu *cpu_buffer;
822 struct rb_irq_work *rbwork;
823
824 if (!buffer)
825 return;
826
827 if (cpu == RING_BUFFER_ALL_CPUS) {
828
829 /* Wake up individual ones too. One level recursion */
830 for_each_buffer_cpu(buffer, cpu)
831 ring_buffer_wake_waiters(buffer, cpu);
832
833 rbwork = &buffer->irq_work;
834 } else {
835 if (WARN_ON_ONCE(!buffer->buffers))
836 return;
837 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
838 return;
839
840 cpu_buffer = buffer->buffers[cpu];
841 /* The CPU buffer may not have been initialized yet */
842 if (!cpu_buffer)
843 return;
844 rbwork = &cpu_buffer->irq_work;
845 }
846
847 /* This can be called in any context */
848 irq_work_queue(&rbwork->work);
849 }
850
rb_watermark_hit(struct trace_buffer * buffer,int cpu,int full)851 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
852 {
853 struct ring_buffer_per_cpu *cpu_buffer;
854 bool ret = false;
855
856 /* Reads of all CPUs always waits for any data */
857 if (cpu == RING_BUFFER_ALL_CPUS)
858 return !ring_buffer_empty(buffer);
859
860 cpu_buffer = buffer->buffers[cpu];
861
862 if (!ring_buffer_empty_cpu(buffer, cpu)) {
863 unsigned long flags;
864 bool pagebusy;
865
866 if (!full)
867 return true;
868
869 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
870 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
871 ret = !pagebusy && full_hit(buffer, cpu, full);
872
873 if (!ret && (!cpu_buffer->shortest_full ||
874 cpu_buffer->shortest_full > full)) {
875 cpu_buffer->shortest_full = full;
876 }
877 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
878 }
879 return ret;
880 }
881
882 static inline bool
rb_wait_cond(struct rb_irq_work * rbwork,struct trace_buffer * buffer,int cpu,int full,ring_buffer_cond_fn cond,void * data)883 rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
884 int cpu, int full, ring_buffer_cond_fn cond, void *data)
885 {
886 if (rb_watermark_hit(buffer, cpu, full))
887 return true;
888
889 if (cond(data))
890 return true;
891
892 /*
893 * The events can happen in critical sections where
894 * checking a work queue can cause deadlocks.
895 * After adding a task to the queue, this flag is set
896 * only to notify events to try to wake up the queue
897 * using irq_work.
898 *
899 * We don't clear it even if the buffer is no longer
900 * empty. The flag only causes the next event to run
901 * irq_work to do the work queue wake up. The worse
902 * that can happen if we race with !trace_empty() is that
903 * an event will cause an irq_work to try to wake up
904 * an empty queue.
905 *
906 * There's no reason to protect this flag either, as
907 * the work queue and irq_work logic will do the necessary
908 * synchronization for the wake ups. The only thing
909 * that is necessary is that the wake up happens after
910 * a task has been queued. It's OK for spurious wake ups.
911 */
912 if (full)
913 rbwork->full_waiters_pending = true;
914 else
915 rbwork->waiters_pending = true;
916
917 return false;
918 }
919
920 struct rb_wait_data {
921 struct rb_irq_work *irq_work;
922 int seq;
923 };
924
925 /*
926 * The default wait condition for ring_buffer_wait() is to just to exit the
927 * wait loop the first time it is woken up.
928 */
rb_wait_once(void * data)929 static bool rb_wait_once(void *data)
930 {
931 struct rb_wait_data *rdata = data;
932 struct rb_irq_work *rbwork = rdata->irq_work;
933
934 return atomic_read_acquire(&rbwork->seq) != rdata->seq;
935 }
936
937 /**
938 * ring_buffer_wait - wait for input to the ring buffer
939 * @buffer: buffer to wait on
940 * @cpu: the cpu buffer to wait on
941 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
942 * @cond: condition function to break out of wait (NULL to run once)
943 * @data: the data to pass to @cond.
944 *
945 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
946 * as data is added to any of the @buffer's cpu buffers. Otherwise
947 * it will wait for data to be added to a specific cpu buffer.
948 */
ring_buffer_wait(struct trace_buffer * buffer,int cpu,int full,ring_buffer_cond_fn cond,void * data)949 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
950 ring_buffer_cond_fn cond, void *data)
951 {
952 struct ring_buffer_per_cpu *cpu_buffer;
953 struct wait_queue_head *waitq;
954 struct rb_irq_work *rbwork;
955 struct rb_wait_data rdata;
956 int ret = 0;
957
958 /*
959 * Depending on what the caller is waiting for, either any
960 * data in any cpu buffer, or a specific buffer, put the
961 * caller on the appropriate wait queue.
962 */
963 if (cpu == RING_BUFFER_ALL_CPUS) {
964 rbwork = &buffer->irq_work;
965 /* Full only makes sense on per cpu reads */
966 full = 0;
967 } else {
968 if (!cpumask_test_cpu(cpu, buffer->cpumask))
969 return -ENODEV;
970 cpu_buffer = buffer->buffers[cpu];
971 rbwork = &cpu_buffer->irq_work;
972 }
973
974 if (full)
975 waitq = &rbwork->full_waiters;
976 else
977 waitq = &rbwork->waiters;
978
979 /* Set up to exit loop as soon as it is woken */
980 if (!cond) {
981 cond = rb_wait_once;
982 rdata.irq_work = rbwork;
983 rdata.seq = atomic_read_acquire(&rbwork->seq);
984 data = &rdata;
985 }
986
987 ret = wait_event_interruptible((*waitq),
988 rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
989
990 return ret;
991 }
992
993 /**
994 * ring_buffer_poll_wait - poll on buffer input
995 * @buffer: buffer to wait on
996 * @cpu: the cpu buffer to wait on
997 * @filp: the file descriptor
998 * @poll_table: The poll descriptor
999 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
1000 *
1001 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1002 * as data is added to any of the @buffer's cpu buffers. Otherwise
1003 * it will wait for data to be added to a specific cpu buffer.
1004 *
1005 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
1006 * zero otherwise.
1007 */
ring_buffer_poll_wait(struct trace_buffer * buffer,int cpu,struct file * filp,poll_table * poll_table,int full)1008 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1009 struct file *filp, poll_table *poll_table, int full)
1010 {
1011 struct ring_buffer_per_cpu *cpu_buffer;
1012 struct rb_irq_work *rbwork;
1013
1014 if (cpu == RING_BUFFER_ALL_CPUS) {
1015 rbwork = &buffer->irq_work;
1016 full = 0;
1017 } else {
1018 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1019 return EPOLLERR;
1020
1021 cpu_buffer = buffer->buffers[cpu];
1022 rbwork = &cpu_buffer->irq_work;
1023 }
1024
1025 if (full) {
1026 poll_wait(filp, &rbwork->full_waiters, poll_table);
1027
1028 if (rb_watermark_hit(buffer, cpu, full))
1029 return EPOLLIN | EPOLLRDNORM;
1030 /*
1031 * Only allow full_waiters_pending update to be seen after
1032 * the shortest_full is set (in rb_watermark_hit). If the
1033 * writer sees the full_waiters_pending flag set, it will
1034 * compare the amount in the ring buffer to shortest_full.
1035 * If the amount in the ring buffer is greater than the
1036 * shortest_full percent, it will call the irq_work handler
1037 * to wake up this list. The irq_handler will reset shortest_full
1038 * back to zero. That's done under the reader_lock, but
1039 * the below smp_mb() makes sure that the update to
1040 * full_waiters_pending doesn't leak up into the above.
1041 */
1042 smp_mb();
1043 rbwork->full_waiters_pending = true;
1044 return 0;
1045 }
1046
1047 poll_wait(filp, &rbwork->waiters, poll_table);
1048 rbwork->waiters_pending = true;
1049
1050 /*
1051 * There's a tight race between setting the waiters_pending and
1052 * checking if the ring buffer is empty. Once the waiters_pending bit
1053 * is set, the next event will wake the task up, but we can get stuck
1054 * if there's only a single event in.
1055 *
1056 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1057 * but adding a memory barrier to all events will cause too much of a
1058 * performance hit in the fast path. We only need a memory barrier when
1059 * the buffer goes from empty to having content. But as this race is
1060 * extremely small, and it's not a problem if another event comes in, we
1061 * will fix it later.
1062 */
1063 smp_mb();
1064
1065 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1066 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1067 return EPOLLIN | EPOLLRDNORM;
1068 return 0;
1069 }
1070
1071 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1072 #define RB_WARN_ON(b, cond) \
1073 ({ \
1074 int _____ret = unlikely(cond); \
1075 if (_____ret) { \
1076 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1077 struct ring_buffer_per_cpu *__b = \
1078 (void *)b; \
1079 atomic_inc(&__b->buffer->record_disabled); \
1080 } else \
1081 atomic_inc(&b->record_disabled); \
1082 WARN_ON(1); \
1083 } \
1084 _____ret; \
1085 })
1086
1087 /* Up this if you want to test the TIME_EXTENTS and normalization */
1088 #define DEBUG_SHIFT 0
1089
rb_time_stamp(struct trace_buffer * buffer)1090 static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1091 {
1092 u64 ts;
1093
1094 /* Skip retpolines :-( */
1095 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1096 ts = trace_clock_local();
1097 else
1098 ts = buffer->clock();
1099
1100 /* shift to debug/test normalization and TIME_EXTENTS */
1101 return ts << DEBUG_SHIFT;
1102 }
1103
ring_buffer_time_stamp(struct trace_buffer * buffer)1104 u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1105 {
1106 u64 time;
1107
1108 preempt_disable_notrace();
1109 time = rb_time_stamp(buffer);
1110 preempt_enable_notrace();
1111
1112 return time;
1113 }
1114 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1115
ring_buffer_normalize_time_stamp(struct trace_buffer * buffer,int cpu,u64 * ts)1116 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1117 int cpu, u64 *ts)
1118 {
1119 /* Just stupid testing the normalize function and deltas */
1120 *ts >>= DEBUG_SHIFT;
1121 }
1122 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1123
1124 /*
1125 * Making the ring buffer lockless makes things tricky.
1126 * Although writes only happen on the CPU that they are on,
1127 * and they only need to worry about interrupts. Reads can
1128 * happen on any CPU.
1129 *
1130 * The reader page is always off the ring buffer, but when the
1131 * reader finishes with a page, it needs to swap its page with
1132 * a new one from the buffer. The reader needs to take from
1133 * the head (writes go to the tail). But if a writer is in overwrite
1134 * mode and wraps, it must push the head page forward.
1135 *
1136 * Here lies the problem.
1137 *
1138 * The reader must be careful to replace only the head page, and
1139 * not another one. As described at the top of the file in the
1140 * ASCII art, the reader sets its old page to point to the next
1141 * page after head. It then sets the page after head to point to
1142 * the old reader page. But if the writer moves the head page
1143 * during this operation, the reader could end up with the tail.
1144 *
1145 * We use cmpxchg to help prevent this race. We also do something
1146 * special with the page before head. We set the LSB to 1.
1147 *
1148 * When the writer must push the page forward, it will clear the
1149 * bit that points to the head page, move the head, and then set
1150 * the bit that points to the new head page.
1151 *
1152 * We also don't want an interrupt coming in and moving the head
1153 * page on another writer. Thus we use the second LSB to catch
1154 * that too. Thus:
1155 *
1156 * head->list->prev->next bit 1 bit 0
1157 * ------- -------
1158 * Normal page 0 0
1159 * Points to head page 0 1
1160 * New head page 1 0
1161 *
1162 * Note we can not trust the prev pointer of the head page, because:
1163 *
1164 * +----+ +-----+ +-----+
1165 * | |------>| T |---X--->| N |
1166 * | |<------| | | |
1167 * +----+ +-----+ +-----+
1168 * ^ ^ |
1169 * | +-----+ | |
1170 * +----------| R |----------+ |
1171 * | |<-----------+
1172 * +-----+
1173 *
1174 * Key: ---X--> HEAD flag set in pointer
1175 * T Tail page
1176 * R Reader page
1177 * N Next page
1178 *
1179 * (see __rb_reserve_next() to see where this happens)
1180 *
1181 * What the above shows is that the reader just swapped out
1182 * the reader page with a page in the buffer, but before it
1183 * could make the new header point back to the new page added
1184 * it was preempted by a writer. The writer moved forward onto
1185 * the new page added by the reader and is about to move forward
1186 * again.
1187 *
1188 * You can see, it is legitimate for the previous pointer of
1189 * the head (or any page) not to point back to itself. But only
1190 * temporarily.
1191 */
1192
1193 #define RB_PAGE_NORMAL 0UL
1194 #define RB_PAGE_HEAD 1UL
1195 #define RB_PAGE_UPDATE 2UL
1196
1197
1198 #define RB_FLAG_MASK 3UL
1199
1200 /* PAGE_MOVED is not part of the mask */
1201 #define RB_PAGE_MOVED 4UL
1202
1203 /*
1204 * rb_list_head - remove any bit
1205 */
rb_list_head(struct list_head * list)1206 static struct list_head *rb_list_head(struct list_head *list)
1207 {
1208 unsigned long val = (unsigned long)list;
1209
1210 return (struct list_head *)(val & ~RB_FLAG_MASK);
1211 }
1212
1213 /*
1214 * rb_is_head_page - test if the given page is the head page
1215 *
1216 * Because the reader may move the head_page pointer, we can
1217 * not trust what the head page is (it may be pointing to
1218 * the reader page). But if the next page is a header page,
1219 * its flags will be non zero.
1220 */
1221 static inline int
rb_is_head_page(struct buffer_page * page,struct list_head * list)1222 rb_is_head_page(struct buffer_page *page, struct list_head *list)
1223 {
1224 unsigned long val;
1225
1226 val = (unsigned long)list->next;
1227
1228 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1229 return RB_PAGE_MOVED;
1230
1231 return val & RB_FLAG_MASK;
1232 }
1233
1234 /*
1235 * rb_is_reader_page
1236 *
1237 * The unique thing about the reader page, is that, if the
1238 * writer is ever on it, the previous pointer never points
1239 * back to the reader page.
1240 */
rb_is_reader_page(struct buffer_page * page)1241 static bool rb_is_reader_page(struct buffer_page *page)
1242 {
1243 struct list_head *list = page->list.prev;
1244
1245 return rb_list_head(list->next) != &page->list;
1246 }
1247
1248 /*
1249 * rb_set_list_to_head - set a list_head to be pointing to head.
1250 */
rb_set_list_to_head(struct list_head * list)1251 static void rb_set_list_to_head(struct list_head *list)
1252 {
1253 unsigned long *ptr;
1254
1255 ptr = (unsigned long *)&list->next;
1256 *ptr |= RB_PAGE_HEAD;
1257 *ptr &= ~RB_PAGE_UPDATE;
1258 }
1259
1260 /*
1261 * rb_head_page_activate - sets up head page
1262 */
rb_head_page_activate(struct ring_buffer_per_cpu * cpu_buffer)1263 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1264 {
1265 struct buffer_page *head;
1266
1267 head = cpu_buffer->head_page;
1268 if (!head)
1269 return;
1270
1271 /*
1272 * Set the previous list pointer to have the HEAD flag.
1273 */
1274 rb_set_list_to_head(head->list.prev);
1275
1276 if (cpu_buffer->ring_meta) {
1277 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
1278 meta->head_buffer = (unsigned long)head->page;
1279 }
1280 }
1281
rb_list_head_clear(struct list_head * list)1282 static void rb_list_head_clear(struct list_head *list)
1283 {
1284 unsigned long *ptr = (unsigned long *)&list->next;
1285
1286 *ptr &= ~RB_FLAG_MASK;
1287 }
1288
1289 /*
1290 * rb_head_page_deactivate - clears head page ptr (for free list)
1291 */
1292 static void
rb_head_page_deactivate(struct ring_buffer_per_cpu * cpu_buffer)1293 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1294 {
1295 struct list_head *hd;
1296
1297 /* Go through the whole list and clear any pointers found. */
1298 rb_list_head_clear(cpu_buffer->pages);
1299
1300 list_for_each(hd, cpu_buffer->pages)
1301 rb_list_head_clear(hd);
1302 }
1303
rb_head_page_set(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag,int new_flag)1304 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1305 struct buffer_page *head,
1306 struct buffer_page *prev,
1307 int old_flag, int new_flag)
1308 {
1309 struct list_head *list;
1310 unsigned long val = (unsigned long)&head->list;
1311 unsigned long ret;
1312
1313 list = &prev->list;
1314
1315 val &= ~RB_FLAG_MASK;
1316
1317 ret = cmpxchg((unsigned long *)&list->next,
1318 val | old_flag, val | new_flag);
1319
1320 /* check if the reader took the page */
1321 if ((ret & ~RB_FLAG_MASK) != val)
1322 return RB_PAGE_MOVED;
1323
1324 return ret & RB_FLAG_MASK;
1325 }
1326
rb_head_page_set_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1327 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1328 struct buffer_page *head,
1329 struct buffer_page *prev,
1330 int old_flag)
1331 {
1332 return rb_head_page_set(cpu_buffer, head, prev,
1333 old_flag, RB_PAGE_UPDATE);
1334 }
1335
rb_head_page_set_head(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1336 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1337 struct buffer_page *head,
1338 struct buffer_page *prev,
1339 int old_flag)
1340 {
1341 return rb_head_page_set(cpu_buffer, head, prev,
1342 old_flag, RB_PAGE_HEAD);
1343 }
1344
rb_head_page_set_normal(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1345 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1346 struct buffer_page *head,
1347 struct buffer_page *prev,
1348 int old_flag)
1349 {
1350 return rb_head_page_set(cpu_buffer, head, prev,
1351 old_flag, RB_PAGE_NORMAL);
1352 }
1353
rb_inc_page(struct buffer_page ** bpage)1354 static inline void rb_inc_page(struct buffer_page **bpage)
1355 {
1356 struct list_head *p = rb_list_head((*bpage)->list.next);
1357
1358 *bpage = list_entry(p, struct buffer_page, list);
1359 }
1360
1361 static struct buffer_page *
rb_set_head_page(struct ring_buffer_per_cpu * cpu_buffer)1362 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1363 {
1364 struct buffer_page *head;
1365 struct buffer_page *page;
1366 struct list_head *list;
1367 int i;
1368
1369 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1370 return NULL;
1371
1372 /* sanity check */
1373 list = cpu_buffer->pages;
1374 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1375 return NULL;
1376
1377 page = head = cpu_buffer->head_page;
1378 /*
1379 * It is possible that the writer moves the header behind
1380 * where we started, and we miss in one loop.
1381 * A second loop should grab the header, but we'll do
1382 * three loops just because I'm paranoid.
1383 */
1384 for (i = 0; i < 3; i++) {
1385 do {
1386 if (rb_is_head_page(page, page->list.prev)) {
1387 cpu_buffer->head_page = page;
1388 return page;
1389 }
1390 rb_inc_page(&page);
1391 } while (page != head);
1392 }
1393
1394 RB_WARN_ON(cpu_buffer, 1);
1395
1396 return NULL;
1397 }
1398
rb_head_page_replace(struct buffer_page * old,struct buffer_page * new)1399 static bool rb_head_page_replace(struct buffer_page *old,
1400 struct buffer_page *new)
1401 {
1402 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1403 unsigned long val;
1404
1405 val = *ptr & ~RB_FLAG_MASK;
1406 val |= RB_PAGE_HEAD;
1407
1408 return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
1409 }
1410
1411 /*
1412 * rb_tail_page_update - move the tail page forward
1413 */
rb_tail_page_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)1414 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1415 struct buffer_page *tail_page,
1416 struct buffer_page *next_page)
1417 {
1418 unsigned long old_entries;
1419 unsigned long old_write;
1420
1421 /*
1422 * The tail page now needs to be moved forward.
1423 *
1424 * We need to reset the tail page, but without messing
1425 * with possible erasing of data brought in by interrupts
1426 * that have moved the tail page and are currently on it.
1427 *
1428 * We add a counter to the write field to denote this.
1429 */
1430 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1431 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1432
1433 /*
1434 * Just make sure we have seen our old_write and synchronize
1435 * with any interrupts that come in.
1436 */
1437 barrier();
1438
1439 /*
1440 * If the tail page is still the same as what we think
1441 * it is, then it is up to us to update the tail
1442 * pointer.
1443 */
1444 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1445 /* Zero the write counter */
1446 unsigned long val = old_write & ~RB_WRITE_MASK;
1447 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1448
1449 /*
1450 * This will only succeed if an interrupt did
1451 * not come in and change it. In which case, we
1452 * do not want to modify it.
1453 *
1454 * We add (void) to let the compiler know that we do not care
1455 * about the return value of these functions. We use the
1456 * cmpxchg to only update if an interrupt did not already
1457 * do it for us. If the cmpxchg fails, we don't care.
1458 */
1459 (void)local_cmpxchg(&next_page->write, old_write, val);
1460 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1461
1462 /*
1463 * No need to worry about races with clearing out the commit.
1464 * it only can increment when a commit takes place. But that
1465 * only happens in the outer most nested commit.
1466 */
1467 local_set(&next_page->page->commit, 0);
1468
1469 /* Either we update tail_page or an interrupt does */
1470 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
1471 local_inc(&cpu_buffer->pages_touched);
1472 }
1473 }
1474
rb_check_bpage(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * bpage)1475 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1476 struct buffer_page *bpage)
1477 {
1478 unsigned long val = (unsigned long)bpage;
1479
1480 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
1481 }
1482
rb_check_links(struct ring_buffer_per_cpu * cpu_buffer,struct list_head * list)1483 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer,
1484 struct list_head *list)
1485 {
1486 if (RB_WARN_ON(cpu_buffer,
1487 rb_list_head(rb_list_head(list->next)->prev) != list))
1488 return false;
1489
1490 if (RB_WARN_ON(cpu_buffer,
1491 rb_list_head(rb_list_head(list->prev)->next) != list))
1492 return false;
1493
1494 return true;
1495 }
1496
1497 /**
1498 * rb_check_pages - integrity check of buffer pages
1499 * @cpu_buffer: CPU buffer with pages to test
1500 *
1501 * As a safety measure we check to make sure the data pages have not
1502 * been corrupted.
1503 */
rb_check_pages(struct ring_buffer_per_cpu * cpu_buffer)1504 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1505 {
1506 struct list_head *head, *tmp;
1507 unsigned long buffer_cnt;
1508 unsigned long flags;
1509 int nr_loops = 0;
1510
1511 /*
1512 * Walk the linked list underpinning the ring buffer and validate all
1513 * its next and prev links.
1514 *
1515 * The check acquires the reader_lock to avoid concurrent processing
1516 * with code that could be modifying the list. However, the lock cannot
1517 * be held for the entire duration of the walk, as this would make the
1518 * time when interrupts are disabled non-deterministic, dependent on the
1519 * ring buffer size. Therefore, the code releases and re-acquires the
1520 * lock after checking each page. The ring_buffer_per_cpu.cnt variable
1521 * is then used to detect if the list was modified while the lock was
1522 * not held, in which case the check needs to be restarted.
1523 *
1524 * The code attempts to perform the check at most three times before
1525 * giving up. This is acceptable because this is only a self-validation
1526 * to detect problems early on. In practice, the list modification
1527 * operations are fairly spaced, and so this check typically succeeds at
1528 * most on the second try.
1529 */
1530 again:
1531 if (++nr_loops > 3)
1532 return;
1533
1534 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1535 head = rb_list_head(cpu_buffer->pages);
1536 if (!rb_check_links(cpu_buffer, head))
1537 goto out_locked;
1538 buffer_cnt = cpu_buffer->cnt;
1539 tmp = head;
1540 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1541
1542 while (true) {
1543 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1544
1545 if (buffer_cnt != cpu_buffer->cnt) {
1546 /* The list was updated, try again. */
1547 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1548 goto again;
1549 }
1550
1551 tmp = rb_list_head(tmp->next);
1552 if (tmp == head)
1553 /* The iteration circled back, all is done. */
1554 goto out_locked;
1555
1556 if (!rb_check_links(cpu_buffer, tmp))
1557 goto out_locked;
1558
1559 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1560 }
1561
1562 out_locked:
1563 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1564 }
1565
1566 /*
1567 * Take an address, add the meta data size as well as the array of
1568 * array subbuffer indexes, then align it to a subbuffer size.
1569 *
1570 * This is used to help find the next per cpu subbuffer within a mapped range.
1571 */
1572 static unsigned long
rb_range_align_subbuf(unsigned long addr,int subbuf_size,int nr_subbufs)1573 rb_range_align_subbuf(unsigned long addr, int subbuf_size, int nr_subbufs)
1574 {
1575 addr += sizeof(struct ring_buffer_cpu_meta) +
1576 sizeof(int) * nr_subbufs;
1577 return ALIGN(addr, subbuf_size);
1578 }
1579
1580 /*
1581 * Return the ring_buffer_meta for a given @cpu.
1582 */
rb_range_meta(struct trace_buffer * buffer,int nr_pages,int cpu)1583 static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu)
1584 {
1585 int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
1586 struct ring_buffer_cpu_meta *meta;
1587 struct ring_buffer_meta *bmeta;
1588 unsigned long ptr;
1589 int nr_subbufs;
1590
1591 bmeta = buffer->meta;
1592 if (!bmeta)
1593 return NULL;
1594
1595 ptr = (unsigned long)bmeta + bmeta->buffers_offset;
1596 meta = (struct ring_buffer_cpu_meta *)ptr;
1597
1598 /* When nr_pages passed in is zero, the first meta has already been initialized */
1599 if (!nr_pages) {
1600 nr_subbufs = meta->nr_subbufs;
1601 } else {
1602 /* Include the reader page */
1603 nr_subbufs = nr_pages + 1;
1604 }
1605
1606 /*
1607 * The first chunk may not be subbuffer aligned, where as
1608 * the rest of the chunks are.
1609 */
1610 if (cpu) {
1611 ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1612 ptr += subbuf_size * nr_subbufs;
1613
1614 /* We can use multiplication to find chunks greater than 1 */
1615 if (cpu > 1) {
1616 unsigned long size;
1617 unsigned long p;
1618
1619 /* Save the beginning of this CPU chunk */
1620 p = ptr;
1621 ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1622 ptr += subbuf_size * nr_subbufs;
1623
1624 /* Now all chunks after this are the same size */
1625 size = ptr - p;
1626 ptr += size * (cpu - 2);
1627 }
1628 }
1629 return (void *)ptr;
1630 }
1631
1632 /* Return the start of subbufs given the meta pointer */
rb_subbufs_from_meta(struct ring_buffer_cpu_meta * meta)1633 static void *rb_subbufs_from_meta(struct ring_buffer_cpu_meta *meta)
1634 {
1635 int subbuf_size = meta->subbuf_size;
1636 unsigned long ptr;
1637
1638 ptr = (unsigned long)meta;
1639 ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs);
1640
1641 return (void *)ptr;
1642 }
1643
1644 /*
1645 * Return a specific sub-buffer for a given @cpu defined by @idx.
1646 */
rb_range_buffer(struct ring_buffer_per_cpu * cpu_buffer,int idx)1647 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
1648 {
1649 struct ring_buffer_cpu_meta *meta;
1650 unsigned long ptr;
1651 int subbuf_size;
1652
1653 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu);
1654 if (!meta)
1655 return NULL;
1656
1657 if (WARN_ON_ONCE(idx >= meta->nr_subbufs))
1658 return NULL;
1659
1660 subbuf_size = meta->subbuf_size;
1661
1662 /* Map this buffer to the order that's in meta->buffers[] */
1663 idx = meta->buffers[idx];
1664
1665 ptr = (unsigned long)rb_subbufs_from_meta(meta);
1666
1667 ptr += subbuf_size * idx;
1668 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end)
1669 return NULL;
1670
1671 return (void *)ptr;
1672 }
1673
1674 /*
1675 * See if the existing memory contains a valid meta section.
1676 * if so, use that, otherwise initialize it.
1677 */
rb_meta_init(struct trace_buffer * buffer,int scratch_size)1678 static bool rb_meta_init(struct trace_buffer *buffer, int scratch_size)
1679 {
1680 unsigned long ptr = buffer->range_addr_start;
1681 struct ring_buffer_meta *bmeta;
1682 unsigned long total_size;
1683 int struct_sizes;
1684
1685 bmeta = (struct ring_buffer_meta *)ptr;
1686 buffer->meta = bmeta;
1687
1688 total_size = buffer->range_addr_end - buffer->range_addr_start;
1689
1690 struct_sizes = sizeof(struct ring_buffer_cpu_meta);
1691 struct_sizes |= sizeof(*bmeta) << 16;
1692
1693 /* The first buffer will start word size after the meta page */
1694 ptr += sizeof(*bmeta);
1695 ptr = ALIGN(ptr, sizeof(long));
1696 ptr += scratch_size;
1697
1698 if (bmeta->magic != RING_BUFFER_META_MAGIC) {
1699 pr_info("Ring buffer boot meta mismatch of magic\n");
1700 goto init;
1701 }
1702
1703 if (bmeta->struct_sizes != struct_sizes) {
1704 pr_info("Ring buffer boot meta mismatch of struct size\n");
1705 goto init;
1706 }
1707
1708 if (bmeta->total_size != total_size) {
1709 pr_info("Ring buffer boot meta mismatch of total size\n");
1710 goto init;
1711 }
1712
1713 if (bmeta->buffers_offset > bmeta->total_size) {
1714 pr_info("Ring buffer boot meta mismatch of offset outside of total size\n");
1715 goto init;
1716 }
1717
1718 if (bmeta->buffers_offset != (void *)ptr - (void *)bmeta) {
1719 pr_info("Ring buffer boot meta mismatch of first buffer offset\n");
1720 goto init;
1721 }
1722
1723 return true;
1724
1725 init:
1726 bmeta->magic = RING_BUFFER_META_MAGIC;
1727 bmeta->struct_sizes = struct_sizes;
1728 bmeta->total_size = total_size;
1729 bmeta->buffers_offset = (void *)ptr - (void *)bmeta;
1730
1731 /* Zero out the scatch pad */
1732 memset((void *)bmeta + sizeof(*bmeta), 0, bmeta->buffers_offset - sizeof(*bmeta));
1733
1734 return false;
1735 }
1736
1737 /*
1738 * See if the existing memory contains valid ring buffer data.
1739 * As the previous kernel must be the same as this kernel, all
1740 * the calculations (size of buffers and number of buffers)
1741 * must be the same.
1742 */
rb_cpu_meta_valid(struct ring_buffer_cpu_meta * meta,int cpu,struct trace_buffer * buffer,int nr_pages,unsigned long * subbuf_mask)1743 static bool rb_cpu_meta_valid(struct ring_buffer_cpu_meta *meta, int cpu,
1744 struct trace_buffer *buffer, int nr_pages,
1745 unsigned long *subbuf_mask)
1746 {
1747 int subbuf_size = PAGE_SIZE;
1748 struct buffer_data_page *subbuf;
1749 unsigned long buffers_start;
1750 unsigned long buffers_end;
1751 int i;
1752
1753 if (!subbuf_mask)
1754 return false;
1755
1756 buffers_start = meta->first_buffer;
1757 buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs);
1758
1759 /* Is the head and commit buffers within the range of buffers? */
1760 if (meta->head_buffer < buffers_start ||
1761 meta->head_buffer >= buffers_end) {
1762 pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu);
1763 return false;
1764 }
1765
1766 if (meta->commit_buffer < buffers_start ||
1767 meta->commit_buffer >= buffers_end) {
1768 pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu);
1769 return false;
1770 }
1771
1772 subbuf = rb_subbufs_from_meta(meta);
1773
1774 bitmap_clear(subbuf_mask, 0, meta->nr_subbufs);
1775
1776 /* Is the meta buffers and the subbufs themselves have correct data? */
1777 for (i = 0; i < meta->nr_subbufs; i++) {
1778 if (meta->buffers[i] < 0 ||
1779 meta->buffers[i] >= meta->nr_subbufs) {
1780 pr_info("Ring buffer boot meta [%d] array out of range\n", cpu);
1781 return false;
1782 }
1783
1784 if ((unsigned)local_read(&subbuf->commit) > subbuf_size) {
1785 pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu);
1786 return false;
1787 }
1788
1789 if (test_bit(meta->buffers[i], subbuf_mask)) {
1790 pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu);
1791 return false;
1792 }
1793
1794 set_bit(meta->buffers[i], subbuf_mask);
1795 subbuf = (void *)subbuf + subbuf_size;
1796 }
1797
1798 return true;
1799 }
1800
1801 static int rb_meta_subbuf_idx(struct ring_buffer_cpu_meta *meta, void *subbuf);
1802
rb_read_data_buffer(struct buffer_data_page * dpage,int tail,int cpu,unsigned long long * timestamp,u64 * delta_ptr)1803 static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu,
1804 unsigned long long *timestamp, u64 *delta_ptr)
1805 {
1806 struct ring_buffer_event *event;
1807 u64 ts, delta;
1808 int events = 0;
1809 int e;
1810
1811 *delta_ptr = 0;
1812 *timestamp = 0;
1813
1814 ts = dpage->time_stamp;
1815
1816 for (e = 0; e < tail; e += rb_event_length(event)) {
1817
1818 event = (struct ring_buffer_event *)(dpage->data + e);
1819
1820 switch (event->type_len) {
1821
1822 case RINGBUF_TYPE_TIME_EXTEND:
1823 delta = rb_event_time_stamp(event);
1824 ts += delta;
1825 break;
1826
1827 case RINGBUF_TYPE_TIME_STAMP:
1828 delta = rb_event_time_stamp(event);
1829 delta = rb_fix_abs_ts(delta, ts);
1830 if (delta < ts) {
1831 *delta_ptr = delta;
1832 *timestamp = ts;
1833 return -1;
1834 }
1835 ts = delta;
1836 break;
1837
1838 case RINGBUF_TYPE_PADDING:
1839 if (event->time_delta == 1)
1840 break;
1841 fallthrough;
1842 case RINGBUF_TYPE_DATA:
1843 events++;
1844 ts += event->time_delta;
1845 break;
1846
1847 default:
1848 return -1;
1849 }
1850 }
1851 *timestamp = ts;
1852 return events;
1853 }
1854
rb_validate_buffer(struct buffer_data_page * dpage,int cpu)1855 static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu)
1856 {
1857 unsigned long long ts;
1858 u64 delta;
1859 int tail;
1860
1861 tail = local_read(&dpage->commit);
1862 return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
1863 }
1864
1865 /* If the meta data has been validated, now validate the events */
rb_meta_validate_events(struct ring_buffer_per_cpu * cpu_buffer)1866 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
1867 {
1868 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
1869 struct buffer_page *head_page;
1870 unsigned long entry_bytes = 0;
1871 unsigned long entries = 0;
1872 int ret;
1873 int i;
1874
1875 if (!meta || !meta->head_buffer)
1876 return;
1877
1878 /* Do the reader page first */
1879 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
1880 if (ret < 0) {
1881 pr_info("Ring buffer reader page is invalid\n");
1882 goto invalid;
1883 }
1884 entries += ret;
1885 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
1886 local_set(&cpu_buffer->reader_page->entries, ret);
1887
1888 head_page = cpu_buffer->head_page;
1889
1890 /* If both the head and commit are on the reader_page then we are done. */
1891 if (head_page == cpu_buffer->reader_page &&
1892 head_page == cpu_buffer->commit_page)
1893 goto done;
1894
1895 /* Iterate until finding the commit page */
1896 for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) {
1897
1898 /* Reader page has already been done */
1899 if (head_page == cpu_buffer->reader_page)
1900 continue;
1901
1902 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
1903 if (ret < 0) {
1904 pr_info("Ring buffer meta [%d] invalid buffer page\n",
1905 cpu_buffer->cpu);
1906 goto invalid;
1907 }
1908
1909 /* If the buffer has content, update pages_touched */
1910 if (ret)
1911 local_inc(&cpu_buffer->pages_touched);
1912
1913 entries += ret;
1914 entry_bytes += local_read(&head_page->page->commit);
1915 local_set(&cpu_buffer->head_page->entries, ret);
1916
1917 if (head_page == cpu_buffer->commit_page)
1918 break;
1919 }
1920
1921 if (head_page != cpu_buffer->commit_page) {
1922 pr_info("Ring buffer meta [%d] commit page not found\n",
1923 cpu_buffer->cpu);
1924 goto invalid;
1925 }
1926 done:
1927 local_set(&cpu_buffer->entries, entries);
1928 local_set(&cpu_buffer->entries_bytes, entry_bytes);
1929
1930 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu);
1931 return;
1932
1933 invalid:
1934 /* The content of the buffers are invalid, reset the meta data */
1935 meta->head_buffer = 0;
1936 meta->commit_buffer = 0;
1937
1938 /* Reset the reader page */
1939 local_set(&cpu_buffer->reader_page->entries, 0);
1940 local_set(&cpu_buffer->reader_page->page->commit, 0);
1941
1942 /* Reset all the subbuffers */
1943 for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) {
1944 local_set(&head_page->entries, 0);
1945 local_set(&head_page->page->commit, 0);
1946 }
1947 }
1948
rb_range_meta_init(struct trace_buffer * buffer,int nr_pages,int scratch_size)1949 static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int scratch_size)
1950 {
1951 struct ring_buffer_cpu_meta *meta;
1952 unsigned long *subbuf_mask;
1953 unsigned long delta;
1954 void *subbuf;
1955 bool valid = false;
1956 int cpu;
1957 int i;
1958
1959 /* Create a mask to test the subbuf array */
1960 subbuf_mask = bitmap_alloc(nr_pages + 1, GFP_KERNEL);
1961 /* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
1962
1963 if (rb_meta_init(buffer, scratch_size))
1964 valid = true;
1965
1966 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1967 void *next_meta;
1968
1969 meta = rb_range_meta(buffer, nr_pages, cpu);
1970
1971 if (valid && rb_cpu_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) {
1972 /* Make the mappings match the current address */
1973 subbuf = rb_subbufs_from_meta(meta);
1974 delta = (unsigned long)subbuf - meta->first_buffer;
1975 meta->first_buffer += delta;
1976 meta->head_buffer += delta;
1977 meta->commit_buffer += delta;
1978 continue;
1979 }
1980
1981 if (cpu < nr_cpu_ids - 1)
1982 next_meta = rb_range_meta(buffer, nr_pages, cpu + 1);
1983 else
1984 next_meta = (void *)buffer->range_addr_end;
1985
1986 memset(meta, 0, next_meta - (void *)meta);
1987
1988 meta->nr_subbufs = nr_pages + 1;
1989 meta->subbuf_size = PAGE_SIZE;
1990
1991 subbuf = rb_subbufs_from_meta(meta);
1992
1993 meta->first_buffer = (unsigned long)subbuf;
1994
1995 /*
1996 * The buffers[] array holds the order of the sub-buffers
1997 * that are after the meta data. The sub-buffers may
1998 * be swapped out when read and inserted into a different
1999 * location of the ring buffer. Although their addresses
2000 * remain the same, the buffers[] array contains the
2001 * index into the sub-buffers holding their actual order.
2002 */
2003 for (i = 0; i < meta->nr_subbufs; i++) {
2004 meta->buffers[i] = i;
2005 rb_init_page(subbuf);
2006 subbuf += meta->subbuf_size;
2007 }
2008 }
2009 bitmap_free(subbuf_mask);
2010 }
2011
rbm_start(struct seq_file * m,loff_t * pos)2012 static void *rbm_start(struct seq_file *m, loff_t *pos)
2013 {
2014 struct ring_buffer_per_cpu *cpu_buffer = m->private;
2015 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2016 unsigned long val;
2017
2018 if (!meta)
2019 return NULL;
2020
2021 if (*pos > meta->nr_subbufs)
2022 return NULL;
2023
2024 val = *pos;
2025 val++;
2026
2027 return (void *)val;
2028 }
2029
rbm_next(struct seq_file * m,void * v,loff_t * pos)2030 static void *rbm_next(struct seq_file *m, void *v, loff_t *pos)
2031 {
2032 (*pos)++;
2033
2034 return rbm_start(m, pos);
2035 }
2036
rbm_show(struct seq_file * m,void * v)2037 static int rbm_show(struct seq_file *m, void *v)
2038 {
2039 struct ring_buffer_per_cpu *cpu_buffer = m->private;
2040 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2041 unsigned long val = (unsigned long)v;
2042
2043 if (val == 1) {
2044 seq_printf(m, "head_buffer: %d\n",
2045 rb_meta_subbuf_idx(meta, (void *)meta->head_buffer));
2046 seq_printf(m, "commit_buffer: %d\n",
2047 rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer));
2048 seq_printf(m, "subbuf_size: %d\n", meta->subbuf_size);
2049 seq_printf(m, "nr_subbufs: %d\n", meta->nr_subbufs);
2050 return 0;
2051 }
2052
2053 val -= 2;
2054 seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]);
2055
2056 return 0;
2057 }
2058
rbm_stop(struct seq_file * m,void * p)2059 static void rbm_stop(struct seq_file *m, void *p)
2060 {
2061 }
2062
2063 static const struct seq_operations rb_meta_seq_ops = {
2064 .start = rbm_start,
2065 .next = rbm_next,
2066 .show = rbm_show,
2067 .stop = rbm_stop,
2068 };
2069
ring_buffer_meta_seq_init(struct file * file,struct trace_buffer * buffer,int cpu)2070 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu)
2071 {
2072 struct seq_file *m;
2073 int ret;
2074
2075 ret = seq_open(file, &rb_meta_seq_ops);
2076 if (ret)
2077 return ret;
2078
2079 m = file->private_data;
2080 m->private = buffer->buffers[cpu];
2081
2082 return 0;
2083 }
2084
2085 /* Map the buffer_pages to the previous head and commit pages */
rb_meta_buffer_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * bpage)2086 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
2087 struct buffer_page *bpage)
2088 {
2089 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2090
2091 if (meta->head_buffer == (unsigned long)bpage->page)
2092 cpu_buffer->head_page = bpage;
2093
2094 if (meta->commit_buffer == (unsigned long)bpage->page) {
2095 cpu_buffer->commit_page = bpage;
2096 cpu_buffer->tail_page = bpage;
2097 }
2098 }
2099
__rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,long nr_pages,struct list_head * pages)2100 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
2101 long nr_pages, struct list_head *pages)
2102 {
2103 struct trace_buffer *buffer = cpu_buffer->buffer;
2104 struct ring_buffer_cpu_meta *meta = NULL;
2105 struct buffer_page *bpage, *tmp;
2106 bool user_thread = current->mm != NULL;
2107 gfp_t mflags;
2108 long i;
2109
2110 /*
2111 * Check if the available memory is there first.
2112 * Note, si_mem_available() only gives us a rough estimate of available
2113 * memory. It may not be accurate. But we don't care, we just want
2114 * to prevent doing any allocation when it is obvious that it is
2115 * not going to succeed.
2116 */
2117 i = si_mem_available();
2118 if (i < nr_pages)
2119 return -ENOMEM;
2120
2121 /*
2122 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
2123 * gracefully without invoking oom-killer and the system is not
2124 * destabilized.
2125 */
2126 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
2127
2128 /*
2129 * If a user thread allocates too much, and si_mem_available()
2130 * reports there's enough memory, even though there is not.
2131 * Make sure the OOM killer kills this thread. This can happen
2132 * even with RETRY_MAYFAIL because another task may be doing
2133 * an allocation after this task has taken all memory.
2134 * This is the task the OOM killer needs to take out during this
2135 * loop, even if it was triggered by an allocation somewhere else.
2136 */
2137 if (user_thread)
2138 set_current_oom_origin();
2139
2140 if (buffer->range_addr_start)
2141 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
2142
2143 for (i = 0; i < nr_pages; i++) {
2144 struct page *page;
2145
2146 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2147 mflags, cpu_to_node(cpu_buffer->cpu));
2148 if (!bpage)
2149 goto free_pages;
2150
2151 rb_check_bpage(cpu_buffer, bpage);
2152
2153 /*
2154 * Append the pages as for mapped buffers we want to keep
2155 * the order
2156 */
2157 list_add_tail(&bpage->list, pages);
2158
2159 if (meta) {
2160 /* A range was given. Use that for the buffer page */
2161 bpage->page = rb_range_buffer(cpu_buffer, i + 1);
2162 if (!bpage->page)
2163 goto free_pages;
2164 /* If this is valid from a previous boot */
2165 if (meta->head_buffer)
2166 rb_meta_buffer_update(cpu_buffer, bpage);
2167 bpage->range = 1;
2168 bpage->id = i + 1;
2169 } else {
2170 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
2171 mflags | __GFP_COMP | __GFP_ZERO,
2172 cpu_buffer->buffer->subbuf_order);
2173 if (!page)
2174 goto free_pages;
2175 bpage->page = page_address(page);
2176 rb_init_page(bpage->page);
2177 }
2178 bpage->order = cpu_buffer->buffer->subbuf_order;
2179
2180 if (user_thread && fatal_signal_pending(current))
2181 goto free_pages;
2182 }
2183 if (user_thread)
2184 clear_current_oom_origin();
2185
2186 return 0;
2187
2188 free_pages:
2189 list_for_each_entry_safe(bpage, tmp, pages, list) {
2190 list_del_init(&bpage->list);
2191 free_buffer_page(bpage);
2192 }
2193 if (user_thread)
2194 clear_current_oom_origin();
2195
2196 return -ENOMEM;
2197 }
2198
rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)2199 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
2200 unsigned long nr_pages)
2201 {
2202 LIST_HEAD(pages);
2203
2204 WARN_ON(!nr_pages);
2205
2206 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
2207 return -ENOMEM;
2208
2209 /*
2210 * The ring buffer page list is a circular list that does not
2211 * start and end with a list head. All page list items point to
2212 * other pages.
2213 */
2214 cpu_buffer->pages = pages.next;
2215 list_del(&pages);
2216
2217 cpu_buffer->nr_pages = nr_pages;
2218
2219 rb_check_pages(cpu_buffer);
2220
2221 return 0;
2222 }
2223
2224 static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct trace_buffer * buffer,long nr_pages,int cpu)2225 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
2226 {
2227 struct ring_buffer_per_cpu *cpu_buffer;
2228 struct ring_buffer_cpu_meta *meta;
2229 struct buffer_page *bpage;
2230 struct page *page;
2231 int ret;
2232
2233 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
2234 GFP_KERNEL, cpu_to_node(cpu));
2235 if (!cpu_buffer)
2236 return NULL;
2237
2238 cpu_buffer->cpu = cpu;
2239 cpu_buffer->buffer = buffer;
2240 raw_spin_lock_init(&cpu_buffer->reader_lock);
2241 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
2242 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
2243 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
2244 init_completion(&cpu_buffer->update_done);
2245 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
2246 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
2247 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
2248 mutex_init(&cpu_buffer->mapping_lock);
2249
2250 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2251 GFP_KERNEL, cpu_to_node(cpu));
2252 if (!bpage)
2253 goto fail_free_buffer;
2254
2255 rb_check_bpage(cpu_buffer, bpage);
2256
2257 cpu_buffer->reader_page = bpage;
2258
2259 if (buffer->range_addr_start) {
2260 /*
2261 * Range mapped buffers have the same restrictions as memory
2262 * mapped ones do.
2263 */
2264 cpu_buffer->mapped = 1;
2265 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu);
2266 bpage->page = rb_range_buffer(cpu_buffer, 0);
2267 if (!bpage->page)
2268 goto fail_free_reader;
2269 if (cpu_buffer->ring_meta->head_buffer)
2270 rb_meta_buffer_update(cpu_buffer, bpage);
2271 bpage->range = 1;
2272 } else {
2273 page = alloc_pages_node(cpu_to_node(cpu),
2274 GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
2275 cpu_buffer->buffer->subbuf_order);
2276 if (!page)
2277 goto fail_free_reader;
2278 bpage->page = page_address(page);
2279 rb_init_page(bpage->page);
2280 }
2281
2282 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2283 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2284
2285 ret = rb_allocate_pages(cpu_buffer, nr_pages);
2286 if (ret < 0)
2287 goto fail_free_reader;
2288
2289 rb_meta_validate_events(cpu_buffer);
2290
2291 /* If the boot meta was valid then this has already been updated */
2292 meta = cpu_buffer->ring_meta;
2293 if (!meta || !meta->head_buffer ||
2294 !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) {
2295 if (meta && meta->head_buffer &&
2296 (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) {
2297 pr_warn("Ring buffer meta buffers not all mapped\n");
2298 if (!cpu_buffer->head_page)
2299 pr_warn(" Missing head_page\n");
2300 if (!cpu_buffer->commit_page)
2301 pr_warn(" Missing commit_page\n");
2302 if (!cpu_buffer->tail_page)
2303 pr_warn(" Missing tail_page\n");
2304 }
2305
2306 cpu_buffer->head_page
2307 = list_entry(cpu_buffer->pages, struct buffer_page, list);
2308 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
2309
2310 rb_head_page_activate(cpu_buffer);
2311
2312 if (cpu_buffer->ring_meta)
2313 meta->commit_buffer = meta->head_buffer;
2314 } else {
2315 /* The valid meta buffer still needs to activate the head page */
2316 rb_head_page_activate(cpu_buffer);
2317 }
2318
2319 return cpu_buffer;
2320
2321 fail_free_reader:
2322 free_buffer_page(cpu_buffer->reader_page);
2323
2324 fail_free_buffer:
2325 kfree(cpu_buffer);
2326 return NULL;
2327 }
2328
rb_free_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)2329 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
2330 {
2331 struct list_head *head = cpu_buffer->pages;
2332 struct buffer_page *bpage, *tmp;
2333
2334 irq_work_sync(&cpu_buffer->irq_work.work);
2335
2336 free_buffer_page(cpu_buffer->reader_page);
2337
2338 if (head) {
2339 rb_head_page_deactivate(cpu_buffer);
2340
2341 list_for_each_entry_safe(bpage, tmp, head, list) {
2342 list_del_init(&bpage->list);
2343 free_buffer_page(bpage);
2344 }
2345 bpage = list_entry(head, struct buffer_page, list);
2346 free_buffer_page(bpage);
2347 }
2348
2349 free_page((unsigned long)cpu_buffer->free_page);
2350
2351 kfree(cpu_buffer);
2352 }
2353
alloc_buffer(unsigned long size,unsigned flags,int order,unsigned long start,unsigned long end,unsigned long scratch_size,struct lock_class_key * key)2354 static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
2355 int order, unsigned long start,
2356 unsigned long end,
2357 unsigned long scratch_size,
2358 struct lock_class_key *key)
2359 {
2360 struct trace_buffer *buffer;
2361 long nr_pages;
2362 int subbuf_size;
2363 int bsize;
2364 int cpu;
2365 int ret;
2366
2367 /* keep it in its own cache line */
2368 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
2369 GFP_KERNEL);
2370 if (!buffer)
2371 return NULL;
2372
2373 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
2374 goto fail_free_buffer;
2375
2376 buffer->subbuf_order = order;
2377 subbuf_size = (PAGE_SIZE << order);
2378 buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE;
2379
2380 /* Max payload is buffer page size - header (8bytes) */
2381 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
2382
2383 buffer->flags = flags;
2384 buffer->clock = trace_clock_local;
2385 buffer->reader_lock_key = key;
2386
2387 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
2388 init_waitqueue_head(&buffer->irq_work.waiters);
2389
2390 buffer->cpus = nr_cpu_ids;
2391
2392 bsize = sizeof(void *) * nr_cpu_ids;
2393 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
2394 GFP_KERNEL);
2395 if (!buffer->buffers)
2396 goto fail_free_cpumask;
2397
2398 /* If start/end are specified, then that overrides size */
2399 if (start && end) {
2400 unsigned long buffers_start;
2401 unsigned long ptr;
2402 int n;
2403
2404 /* Make sure that start is word aligned */
2405 start = ALIGN(start, sizeof(long));
2406
2407 /* scratch_size needs to be aligned too */
2408 scratch_size = ALIGN(scratch_size, sizeof(long));
2409
2410 /* Subtract the buffer meta data and word aligned */
2411 buffers_start = start + sizeof(struct ring_buffer_cpu_meta);
2412 buffers_start = ALIGN(buffers_start, sizeof(long));
2413 buffers_start += scratch_size;
2414
2415 /* Calculate the size for the per CPU data */
2416 size = end - buffers_start;
2417 size = size / nr_cpu_ids;
2418
2419 /*
2420 * The number of sub-buffers (nr_pages) is determined by the
2421 * total size allocated minus the meta data size.
2422 * Then that is divided by the number of per CPU buffers
2423 * needed, plus account for the integer array index that
2424 * will be appended to the meta data.
2425 */
2426 nr_pages = (size - sizeof(struct ring_buffer_cpu_meta)) /
2427 (subbuf_size + sizeof(int));
2428 /* Need at least two pages plus the reader page */
2429 if (nr_pages < 3)
2430 goto fail_free_buffers;
2431
2432 again:
2433 /* Make sure that the size fits aligned */
2434 for (n = 0, ptr = buffers_start; n < nr_cpu_ids; n++) {
2435 ptr += sizeof(struct ring_buffer_cpu_meta) +
2436 sizeof(int) * nr_pages;
2437 ptr = ALIGN(ptr, subbuf_size);
2438 ptr += subbuf_size * nr_pages;
2439 }
2440 if (ptr > end) {
2441 if (nr_pages <= 3)
2442 goto fail_free_buffers;
2443 nr_pages--;
2444 goto again;
2445 }
2446
2447 /* nr_pages should not count the reader page */
2448 nr_pages--;
2449 buffer->range_addr_start = start;
2450 buffer->range_addr_end = end;
2451
2452 rb_range_meta_init(buffer, nr_pages, scratch_size);
2453 } else {
2454
2455 /* need at least two pages */
2456 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2457 if (nr_pages < 2)
2458 nr_pages = 2;
2459 }
2460
2461 cpu = raw_smp_processor_id();
2462 cpumask_set_cpu(cpu, buffer->cpumask);
2463 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
2464 if (!buffer->buffers[cpu])
2465 goto fail_free_buffers;
2466
2467 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2468 if (ret < 0)
2469 goto fail_free_buffers;
2470
2471 mutex_init(&buffer->mutex);
2472
2473 return buffer;
2474
2475 fail_free_buffers:
2476 for_each_buffer_cpu(buffer, cpu) {
2477 if (buffer->buffers[cpu])
2478 rb_free_cpu_buffer(buffer->buffers[cpu]);
2479 }
2480 kfree(buffer->buffers);
2481
2482 fail_free_cpumask:
2483 free_cpumask_var(buffer->cpumask);
2484
2485 fail_free_buffer:
2486 kfree(buffer);
2487 return NULL;
2488 }
2489
2490 /**
2491 * __ring_buffer_alloc - allocate a new ring_buffer
2492 * @size: the size in bytes per cpu that is needed.
2493 * @flags: attributes to set for the ring buffer.
2494 * @key: ring buffer reader_lock_key.
2495 *
2496 * Currently the only flag that is available is the RB_FL_OVERWRITE
2497 * flag. This flag means that the buffer will overwrite old data
2498 * when the buffer wraps. If this flag is not set, the buffer will
2499 * drop data when the tail hits the head.
2500 */
__ring_buffer_alloc(unsigned long size,unsigned flags,struct lock_class_key * key)2501 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
2502 struct lock_class_key *key)
2503 {
2504 /* Default buffer page size - one system page */
2505 return alloc_buffer(size, flags, 0, 0, 0, 0, key);
2506
2507 }
2508 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
2509
2510 /**
2511 * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
2512 * @size: the size in bytes per cpu that is needed.
2513 * @flags: attributes to set for the ring buffer.
2514 * @order: sub-buffer order
2515 * @start: start of allocated range
2516 * @range_size: size of allocated range
2517 * @scratch_size: size of scratch area (for preallocated memory buffers)
2518 * @key: ring buffer reader_lock_key.
2519 *
2520 * Currently the only flag that is available is the RB_FL_OVERWRITE
2521 * flag. This flag means that the buffer will overwrite old data
2522 * when the buffer wraps. If this flag is not set, the buffer will
2523 * drop data when the tail hits the head.
2524 */
__ring_buffer_alloc_range(unsigned long size,unsigned flags,int order,unsigned long start,unsigned long range_size,unsigned long scratch_size,struct lock_class_key * key)2525 struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
2526 int order, unsigned long start,
2527 unsigned long range_size,
2528 unsigned long scratch_size,
2529 struct lock_class_key *key)
2530 {
2531 return alloc_buffer(size, flags, order, start, start + range_size,
2532 scratch_size, key);
2533 }
2534
ring_buffer_meta_scratch(struct trace_buffer * buffer,unsigned int * size)2535 void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size)
2536 {
2537 struct ring_buffer_meta *meta;
2538 void *ptr;
2539
2540 if (!buffer || !buffer->meta)
2541 return NULL;
2542
2543 meta = buffer->meta;
2544
2545 ptr = (void *)ALIGN((unsigned long)meta + sizeof(*meta), sizeof(long));
2546
2547 if (size)
2548 *size = (void *)meta + meta->buffers_offset - ptr;
2549
2550 return ptr;
2551 }
2552
2553 /**
2554 * ring_buffer_free - free a ring buffer.
2555 * @buffer: the buffer to free.
2556 */
2557 void
ring_buffer_free(struct trace_buffer * buffer)2558 ring_buffer_free(struct trace_buffer *buffer)
2559 {
2560 int cpu;
2561
2562 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2563
2564 irq_work_sync(&buffer->irq_work.work);
2565
2566 for_each_buffer_cpu(buffer, cpu)
2567 rb_free_cpu_buffer(buffer->buffers[cpu]);
2568
2569 kfree(buffer->buffers);
2570 free_cpumask_var(buffer->cpumask);
2571
2572 kfree(buffer);
2573 }
2574 EXPORT_SYMBOL_GPL(ring_buffer_free);
2575
ring_buffer_set_clock(struct trace_buffer * buffer,u64 (* clock)(void))2576 void ring_buffer_set_clock(struct trace_buffer *buffer,
2577 u64 (*clock)(void))
2578 {
2579 buffer->clock = clock;
2580 }
2581
ring_buffer_set_time_stamp_abs(struct trace_buffer * buffer,bool abs)2582 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
2583 {
2584 buffer->time_stamp_abs = abs;
2585 }
2586
ring_buffer_time_stamp_abs(struct trace_buffer * buffer)2587 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
2588 {
2589 return buffer->time_stamp_abs;
2590 }
2591
rb_page_entries(struct buffer_page * bpage)2592 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
2593 {
2594 return local_read(&bpage->entries) & RB_WRITE_MASK;
2595 }
2596
rb_page_write(struct buffer_page * bpage)2597 static inline unsigned long rb_page_write(struct buffer_page *bpage)
2598 {
2599 return local_read(&bpage->write) & RB_WRITE_MASK;
2600 }
2601
2602 static bool
rb_remove_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)2603 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
2604 {
2605 struct list_head *tail_page, *to_remove, *next_page;
2606 struct buffer_page *to_remove_page, *tmp_iter_page;
2607 struct buffer_page *last_page, *first_page;
2608 unsigned long nr_removed;
2609 unsigned long head_bit;
2610 int page_entries;
2611
2612 head_bit = 0;
2613
2614 raw_spin_lock_irq(&cpu_buffer->reader_lock);
2615 atomic_inc(&cpu_buffer->record_disabled);
2616 /*
2617 * We don't race with the readers since we have acquired the reader
2618 * lock. We also don't race with writers after disabling recording.
2619 * This makes it easy to figure out the first and the last page to be
2620 * removed from the list. We unlink all the pages in between including
2621 * the first and last pages. This is done in a busy loop so that we
2622 * lose the least number of traces.
2623 * The pages are freed after we restart recording and unlock readers.
2624 */
2625 tail_page = &cpu_buffer->tail_page->list;
2626
2627 /*
2628 * tail page might be on reader page, we remove the next page
2629 * from the ring buffer
2630 */
2631 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2632 tail_page = rb_list_head(tail_page->next);
2633 to_remove = tail_page;
2634
2635 /* start of pages to remove */
2636 first_page = list_entry(rb_list_head(to_remove->next),
2637 struct buffer_page, list);
2638
2639 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
2640 to_remove = rb_list_head(to_remove)->next;
2641 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
2642 }
2643 /* Read iterators need to reset themselves when some pages removed */
2644 cpu_buffer->pages_removed += nr_removed;
2645
2646 next_page = rb_list_head(to_remove)->next;
2647
2648 /*
2649 * Now we remove all pages between tail_page and next_page.
2650 * Make sure that we have head_bit value preserved for the
2651 * next page
2652 */
2653 tail_page->next = (struct list_head *)((unsigned long)next_page |
2654 head_bit);
2655 next_page = rb_list_head(next_page);
2656 next_page->prev = tail_page;
2657
2658 /* make sure pages points to a valid page in the ring buffer */
2659 cpu_buffer->pages = next_page;
2660 cpu_buffer->cnt++;
2661
2662 /* update head page */
2663 if (head_bit)
2664 cpu_buffer->head_page = list_entry(next_page,
2665 struct buffer_page, list);
2666
2667 /* pages are removed, resume tracing and then free the pages */
2668 atomic_dec(&cpu_buffer->record_disabled);
2669 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
2670
2671 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
2672
2673 /* last buffer page to remove */
2674 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
2675 list);
2676 tmp_iter_page = first_page;
2677
2678 do {
2679 cond_resched();
2680
2681 to_remove_page = tmp_iter_page;
2682 rb_inc_page(&tmp_iter_page);
2683
2684 /* update the counters */
2685 page_entries = rb_page_entries(to_remove_page);
2686 if (page_entries) {
2687 /*
2688 * If something was added to this page, it was full
2689 * since it is not the tail page. So we deduct the
2690 * bytes consumed in ring buffer from here.
2691 * Increment overrun to account for the lost events.
2692 */
2693 local_add(page_entries, &cpu_buffer->overrun);
2694 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
2695 local_inc(&cpu_buffer->pages_lost);
2696 }
2697
2698 /*
2699 * We have already removed references to this list item, just
2700 * free up the buffer_page and its page
2701 */
2702 free_buffer_page(to_remove_page);
2703 nr_removed--;
2704
2705 } while (to_remove_page != last_page);
2706
2707 RB_WARN_ON(cpu_buffer, nr_removed);
2708
2709 return nr_removed == 0;
2710 }
2711
2712 static bool
rb_insert_pages(struct ring_buffer_per_cpu * cpu_buffer)2713 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
2714 {
2715 struct list_head *pages = &cpu_buffer->new_pages;
2716 unsigned long flags;
2717 bool success;
2718 int retries;
2719
2720 /* Can be called at early boot up, where interrupts must not been enabled */
2721 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2722 /*
2723 * We are holding the reader lock, so the reader page won't be swapped
2724 * in the ring buffer. Now we are racing with the writer trying to
2725 * move head page and the tail page.
2726 * We are going to adapt the reader page update process where:
2727 * 1. We first splice the start and end of list of new pages between
2728 * the head page and its previous page.
2729 * 2. We cmpxchg the prev_page->next to point from head page to the
2730 * start of new pages list.
2731 * 3. Finally, we update the head->prev to the end of new list.
2732 *
2733 * We will try this process 10 times, to make sure that we don't keep
2734 * spinning.
2735 */
2736 retries = 10;
2737 success = false;
2738 while (retries--) {
2739 struct list_head *head_page, *prev_page;
2740 struct list_head *last_page, *first_page;
2741 struct list_head *head_page_with_bit;
2742 struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
2743
2744 if (!hpage)
2745 break;
2746 head_page = &hpage->list;
2747 prev_page = head_page->prev;
2748
2749 first_page = pages->next;
2750 last_page = pages->prev;
2751
2752 head_page_with_bit = (struct list_head *)
2753 ((unsigned long)head_page | RB_PAGE_HEAD);
2754
2755 last_page->next = head_page_with_bit;
2756 first_page->prev = prev_page;
2757
2758 /* caution: head_page_with_bit gets updated on cmpxchg failure */
2759 if (try_cmpxchg(&prev_page->next,
2760 &head_page_with_bit, first_page)) {
2761 /*
2762 * yay, we replaced the page pointer to our new list,
2763 * now, we just have to update to head page's prev
2764 * pointer to point to end of list
2765 */
2766 head_page->prev = last_page;
2767 cpu_buffer->cnt++;
2768 success = true;
2769 break;
2770 }
2771 }
2772
2773 if (success)
2774 INIT_LIST_HEAD(pages);
2775 /*
2776 * If we weren't successful in adding in new pages, warn and stop
2777 * tracing
2778 */
2779 RB_WARN_ON(cpu_buffer, !success);
2780 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2781
2782 /* free pages if they weren't inserted */
2783 if (!success) {
2784 struct buffer_page *bpage, *tmp;
2785 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2786 list) {
2787 list_del_init(&bpage->list);
2788 free_buffer_page(bpage);
2789 }
2790 }
2791 return success;
2792 }
2793
rb_update_pages(struct ring_buffer_per_cpu * cpu_buffer)2794 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2795 {
2796 bool success;
2797
2798 if (cpu_buffer->nr_pages_to_update > 0)
2799 success = rb_insert_pages(cpu_buffer);
2800 else
2801 success = rb_remove_pages(cpu_buffer,
2802 -cpu_buffer->nr_pages_to_update);
2803
2804 if (success)
2805 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2806 }
2807
update_pages_handler(struct work_struct * work)2808 static void update_pages_handler(struct work_struct *work)
2809 {
2810 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2811 struct ring_buffer_per_cpu, update_pages_work);
2812 rb_update_pages(cpu_buffer);
2813 complete(&cpu_buffer->update_done);
2814 }
2815
2816 /**
2817 * ring_buffer_resize - resize the ring buffer
2818 * @buffer: the buffer to resize.
2819 * @size: the new size.
2820 * @cpu_id: the cpu buffer to resize
2821 *
2822 * Minimum size is 2 * buffer->subbuf_size.
2823 *
2824 * Returns 0 on success and < 0 on failure.
2825 */
ring_buffer_resize(struct trace_buffer * buffer,unsigned long size,int cpu_id)2826 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2827 int cpu_id)
2828 {
2829 struct ring_buffer_per_cpu *cpu_buffer;
2830 unsigned long nr_pages;
2831 int cpu, err;
2832
2833 /*
2834 * Always succeed at resizing a non-existent buffer:
2835 */
2836 if (!buffer)
2837 return 0;
2838
2839 /* Make sure the requested buffer exists */
2840 if (cpu_id != RING_BUFFER_ALL_CPUS &&
2841 !cpumask_test_cpu(cpu_id, buffer->cpumask))
2842 return 0;
2843
2844 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2845
2846 /* we need a minimum of two pages */
2847 if (nr_pages < 2)
2848 nr_pages = 2;
2849
2850 /* prevent another thread from changing buffer sizes */
2851 mutex_lock(&buffer->mutex);
2852 atomic_inc(&buffer->resizing);
2853
2854 if (cpu_id == RING_BUFFER_ALL_CPUS) {
2855 /*
2856 * Don't succeed if resizing is disabled, as a reader might be
2857 * manipulating the ring buffer and is expecting a sane state while
2858 * this is true.
2859 */
2860 for_each_buffer_cpu(buffer, cpu) {
2861 cpu_buffer = buffer->buffers[cpu];
2862 if (atomic_read(&cpu_buffer->resize_disabled)) {
2863 err = -EBUSY;
2864 goto out_err_unlock;
2865 }
2866 }
2867
2868 /* calculate the pages to update */
2869 for_each_buffer_cpu(buffer, cpu) {
2870 cpu_buffer = buffer->buffers[cpu];
2871
2872 cpu_buffer->nr_pages_to_update = nr_pages -
2873 cpu_buffer->nr_pages;
2874 /*
2875 * nothing more to do for removing pages or no update
2876 */
2877 if (cpu_buffer->nr_pages_to_update <= 0)
2878 continue;
2879 /*
2880 * to add pages, make sure all new pages can be
2881 * allocated without receiving ENOMEM
2882 */
2883 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2884 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2885 &cpu_buffer->new_pages)) {
2886 /* not enough memory for new pages */
2887 err = -ENOMEM;
2888 goto out_err;
2889 }
2890
2891 cond_resched();
2892 }
2893
2894 cpus_read_lock();
2895 /*
2896 * Fire off all the required work handlers
2897 * We can't schedule on offline CPUs, but it's not necessary
2898 * since we can change their buffer sizes without any race.
2899 */
2900 for_each_buffer_cpu(buffer, cpu) {
2901 cpu_buffer = buffer->buffers[cpu];
2902 if (!cpu_buffer->nr_pages_to_update)
2903 continue;
2904
2905 /* Can't run something on an offline CPU. */
2906 if (!cpu_online(cpu)) {
2907 rb_update_pages(cpu_buffer);
2908 cpu_buffer->nr_pages_to_update = 0;
2909 } else {
2910 /* Run directly if possible. */
2911 migrate_disable();
2912 if (cpu != smp_processor_id()) {
2913 migrate_enable();
2914 schedule_work_on(cpu,
2915 &cpu_buffer->update_pages_work);
2916 } else {
2917 update_pages_handler(&cpu_buffer->update_pages_work);
2918 migrate_enable();
2919 }
2920 }
2921 }
2922
2923 /* wait for all the updates to complete */
2924 for_each_buffer_cpu(buffer, cpu) {
2925 cpu_buffer = buffer->buffers[cpu];
2926 if (!cpu_buffer->nr_pages_to_update)
2927 continue;
2928
2929 if (cpu_online(cpu))
2930 wait_for_completion(&cpu_buffer->update_done);
2931 cpu_buffer->nr_pages_to_update = 0;
2932 }
2933
2934 cpus_read_unlock();
2935 } else {
2936 cpu_buffer = buffer->buffers[cpu_id];
2937
2938 if (nr_pages == cpu_buffer->nr_pages)
2939 goto out;
2940
2941 /*
2942 * Don't succeed if resizing is disabled, as a reader might be
2943 * manipulating the ring buffer and is expecting a sane state while
2944 * this is true.
2945 */
2946 if (atomic_read(&cpu_buffer->resize_disabled)) {
2947 err = -EBUSY;
2948 goto out_err_unlock;
2949 }
2950
2951 cpu_buffer->nr_pages_to_update = nr_pages -
2952 cpu_buffer->nr_pages;
2953
2954 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2955 if (cpu_buffer->nr_pages_to_update > 0 &&
2956 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2957 &cpu_buffer->new_pages)) {
2958 err = -ENOMEM;
2959 goto out_err;
2960 }
2961
2962 cpus_read_lock();
2963
2964 /* Can't run something on an offline CPU. */
2965 if (!cpu_online(cpu_id))
2966 rb_update_pages(cpu_buffer);
2967 else {
2968 /* Run directly if possible. */
2969 migrate_disable();
2970 if (cpu_id == smp_processor_id()) {
2971 rb_update_pages(cpu_buffer);
2972 migrate_enable();
2973 } else {
2974 migrate_enable();
2975 schedule_work_on(cpu_id,
2976 &cpu_buffer->update_pages_work);
2977 wait_for_completion(&cpu_buffer->update_done);
2978 }
2979 }
2980
2981 cpu_buffer->nr_pages_to_update = 0;
2982 cpus_read_unlock();
2983 }
2984
2985 out:
2986 /*
2987 * The ring buffer resize can happen with the ring buffer
2988 * enabled, so that the update disturbs the tracing as little
2989 * as possible. But if the buffer is disabled, we do not need
2990 * to worry about that, and we can take the time to verify
2991 * that the buffer is not corrupt.
2992 */
2993 if (atomic_read(&buffer->record_disabled)) {
2994 atomic_inc(&buffer->record_disabled);
2995 /*
2996 * Even though the buffer was disabled, we must make sure
2997 * that it is truly disabled before calling rb_check_pages.
2998 * There could have been a race between checking
2999 * record_disable and incrementing it.
3000 */
3001 synchronize_rcu();
3002 for_each_buffer_cpu(buffer, cpu) {
3003 cpu_buffer = buffer->buffers[cpu];
3004 rb_check_pages(cpu_buffer);
3005 }
3006 atomic_dec(&buffer->record_disabled);
3007 }
3008
3009 atomic_dec(&buffer->resizing);
3010 mutex_unlock(&buffer->mutex);
3011 return 0;
3012
3013 out_err:
3014 for_each_buffer_cpu(buffer, cpu) {
3015 struct buffer_page *bpage, *tmp;
3016
3017 cpu_buffer = buffer->buffers[cpu];
3018 cpu_buffer->nr_pages_to_update = 0;
3019
3020 if (list_empty(&cpu_buffer->new_pages))
3021 continue;
3022
3023 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
3024 list) {
3025 list_del_init(&bpage->list);
3026 free_buffer_page(bpage);
3027 }
3028 }
3029 out_err_unlock:
3030 atomic_dec(&buffer->resizing);
3031 mutex_unlock(&buffer->mutex);
3032 return err;
3033 }
3034 EXPORT_SYMBOL_GPL(ring_buffer_resize);
3035
ring_buffer_change_overwrite(struct trace_buffer * buffer,int val)3036 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
3037 {
3038 mutex_lock(&buffer->mutex);
3039 if (val)
3040 buffer->flags |= RB_FL_OVERWRITE;
3041 else
3042 buffer->flags &= ~RB_FL_OVERWRITE;
3043 mutex_unlock(&buffer->mutex);
3044 }
3045 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
3046
__rb_page_index(struct buffer_page * bpage,unsigned index)3047 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
3048 {
3049 return bpage->page->data + index;
3050 }
3051
3052 static __always_inline struct ring_buffer_event *
rb_reader_event(struct ring_buffer_per_cpu * cpu_buffer)3053 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
3054 {
3055 return __rb_page_index(cpu_buffer->reader_page,
3056 cpu_buffer->reader_page->read);
3057 }
3058
3059 static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter * iter)3060 rb_iter_head_event(struct ring_buffer_iter *iter)
3061 {
3062 struct ring_buffer_event *event;
3063 struct buffer_page *iter_head_page = iter->head_page;
3064 unsigned long commit;
3065 unsigned length;
3066
3067 if (iter->head != iter->next_event)
3068 return iter->event;
3069
3070 /*
3071 * When the writer goes across pages, it issues a cmpxchg which
3072 * is a mb(), which will synchronize with the rmb here.
3073 * (see rb_tail_page_update() and __rb_reserve_next())
3074 */
3075 commit = rb_page_commit(iter_head_page);
3076 smp_rmb();
3077
3078 /* An event needs to be at least 8 bytes in size */
3079 if (iter->head > commit - 8)
3080 goto reset;
3081
3082 event = __rb_page_index(iter_head_page, iter->head);
3083 length = rb_event_length(event);
3084
3085 /*
3086 * READ_ONCE() doesn't work on functions and we don't want the
3087 * compiler doing any crazy optimizations with length.
3088 */
3089 barrier();
3090
3091 if ((iter->head + length) > commit || length > iter->event_size)
3092 /* Writer corrupted the read? */
3093 goto reset;
3094
3095 memcpy(iter->event, event, length);
3096 /*
3097 * If the page stamp is still the same after this rmb() then the
3098 * event was safely copied without the writer entering the page.
3099 */
3100 smp_rmb();
3101
3102 /* Make sure the page didn't change since we read this */
3103 if (iter->page_stamp != iter_head_page->page->time_stamp ||
3104 commit > rb_page_commit(iter_head_page))
3105 goto reset;
3106
3107 iter->next_event = iter->head + length;
3108 return iter->event;
3109 reset:
3110 /* Reset to the beginning */
3111 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
3112 iter->head = 0;
3113 iter->next_event = 0;
3114 iter->missed_events = 1;
3115 return NULL;
3116 }
3117
3118 /* Size is determined by what has been committed */
rb_page_size(struct buffer_page * bpage)3119 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
3120 {
3121 return rb_page_commit(bpage) & ~RB_MISSED_MASK;
3122 }
3123
3124 static __always_inline unsigned
rb_commit_index(struct ring_buffer_per_cpu * cpu_buffer)3125 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
3126 {
3127 return rb_page_commit(cpu_buffer->commit_page);
3128 }
3129
3130 static __always_inline unsigned
rb_event_index(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)3131 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
3132 {
3133 unsigned long addr = (unsigned long)event;
3134
3135 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
3136
3137 return addr - BUF_PAGE_HDR_SIZE;
3138 }
3139
rb_inc_iter(struct ring_buffer_iter * iter)3140 static void rb_inc_iter(struct ring_buffer_iter *iter)
3141 {
3142 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3143
3144 /*
3145 * The iterator could be on the reader page (it starts there).
3146 * But the head could have moved, since the reader was
3147 * found. Check for this case and assign the iterator
3148 * to the head page instead of next.
3149 */
3150 if (iter->head_page == cpu_buffer->reader_page)
3151 iter->head_page = rb_set_head_page(cpu_buffer);
3152 else
3153 rb_inc_page(&iter->head_page);
3154
3155 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
3156 iter->head = 0;
3157 iter->next_event = 0;
3158 }
3159
3160 /* Return the index into the sub-buffers for a given sub-buffer */
rb_meta_subbuf_idx(struct ring_buffer_cpu_meta * meta,void * subbuf)3161 static int rb_meta_subbuf_idx(struct ring_buffer_cpu_meta *meta, void *subbuf)
3162 {
3163 void *subbuf_array;
3164
3165 subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs;
3166 subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size);
3167 return (subbuf - subbuf_array) / meta->subbuf_size;
3168 }
3169
rb_update_meta_head(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * next_page)3170 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer,
3171 struct buffer_page *next_page)
3172 {
3173 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3174 unsigned long old_head = (unsigned long)next_page->page;
3175 unsigned long new_head;
3176
3177 rb_inc_page(&next_page);
3178 new_head = (unsigned long)next_page->page;
3179
3180 /*
3181 * Only move it forward once, if something else came in and
3182 * moved it forward, then we don't want to touch it.
3183 */
3184 (void)cmpxchg(&meta->head_buffer, old_head, new_head);
3185 }
3186
rb_update_meta_reader(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * reader)3187 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer,
3188 struct buffer_page *reader)
3189 {
3190 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3191 void *old_reader = cpu_buffer->reader_page->page;
3192 void *new_reader = reader->page;
3193 int id;
3194
3195 id = reader->id;
3196 cpu_buffer->reader_page->id = id;
3197 reader->id = 0;
3198
3199 meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader);
3200 meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader);
3201
3202 /* The head pointer is the one after the reader */
3203 rb_update_meta_head(cpu_buffer, reader);
3204 }
3205
3206 /*
3207 * rb_handle_head_page - writer hit the head page
3208 *
3209 * Returns: +1 to retry page
3210 * 0 to continue
3211 * -1 on error
3212 */
3213 static int
rb_handle_head_page(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)3214 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
3215 struct buffer_page *tail_page,
3216 struct buffer_page *next_page)
3217 {
3218 struct buffer_page *new_head;
3219 int entries;
3220 int type;
3221 int ret;
3222
3223 entries = rb_page_entries(next_page);
3224
3225 /*
3226 * The hard part is here. We need to move the head
3227 * forward, and protect against both readers on
3228 * other CPUs and writers coming in via interrupts.
3229 */
3230 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
3231 RB_PAGE_HEAD);
3232
3233 /*
3234 * type can be one of four:
3235 * NORMAL - an interrupt already moved it for us
3236 * HEAD - we are the first to get here.
3237 * UPDATE - we are the interrupt interrupting
3238 * a current move.
3239 * MOVED - a reader on another CPU moved the next
3240 * pointer to its reader page. Give up
3241 * and try again.
3242 */
3243
3244 switch (type) {
3245 case RB_PAGE_HEAD:
3246 /*
3247 * We changed the head to UPDATE, thus
3248 * it is our responsibility to update
3249 * the counters.
3250 */
3251 local_add(entries, &cpu_buffer->overrun);
3252 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
3253 local_inc(&cpu_buffer->pages_lost);
3254
3255 if (cpu_buffer->ring_meta)
3256 rb_update_meta_head(cpu_buffer, next_page);
3257 /*
3258 * The entries will be zeroed out when we move the
3259 * tail page.
3260 */
3261
3262 /* still more to do */
3263 break;
3264
3265 case RB_PAGE_UPDATE:
3266 /*
3267 * This is an interrupt that interrupt the
3268 * previous update. Still more to do.
3269 */
3270 break;
3271 case RB_PAGE_NORMAL:
3272 /*
3273 * An interrupt came in before the update
3274 * and processed this for us.
3275 * Nothing left to do.
3276 */
3277 return 1;
3278 case RB_PAGE_MOVED:
3279 /*
3280 * The reader is on another CPU and just did
3281 * a swap with our next_page.
3282 * Try again.
3283 */
3284 return 1;
3285 default:
3286 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
3287 return -1;
3288 }
3289
3290 /*
3291 * Now that we are here, the old head pointer is
3292 * set to UPDATE. This will keep the reader from
3293 * swapping the head page with the reader page.
3294 * The reader (on another CPU) will spin till
3295 * we are finished.
3296 *
3297 * We just need to protect against interrupts
3298 * doing the job. We will set the next pointer
3299 * to HEAD. After that, we set the old pointer
3300 * to NORMAL, but only if it was HEAD before.
3301 * otherwise we are an interrupt, and only
3302 * want the outer most commit to reset it.
3303 */
3304 new_head = next_page;
3305 rb_inc_page(&new_head);
3306
3307 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
3308 RB_PAGE_NORMAL);
3309
3310 /*
3311 * Valid returns are:
3312 * HEAD - an interrupt came in and already set it.
3313 * NORMAL - One of two things:
3314 * 1) We really set it.
3315 * 2) A bunch of interrupts came in and moved
3316 * the page forward again.
3317 */
3318 switch (ret) {
3319 case RB_PAGE_HEAD:
3320 case RB_PAGE_NORMAL:
3321 /* OK */
3322 break;
3323 default:
3324 RB_WARN_ON(cpu_buffer, 1);
3325 return -1;
3326 }
3327
3328 /*
3329 * It is possible that an interrupt came in,
3330 * set the head up, then more interrupts came in
3331 * and moved it again. When we get back here,
3332 * the page would have been set to NORMAL but we
3333 * just set it back to HEAD.
3334 *
3335 * How do you detect this? Well, if that happened
3336 * the tail page would have moved.
3337 */
3338 if (ret == RB_PAGE_NORMAL) {
3339 struct buffer_page *buffer_tail_page;
3340
3341 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
3342 /*
3343 * If the tail had moved passed next, then we need
3344 * to reset the pointer.
3345 */
3346 if (buffer_tail_page != tail_page &&
3347 buffer_tail_page != next_page)
3348 rb_head_page_set_normal(cpu_buffer, new_head,
3349 next_page,
3350 RB_PAGE_HEAD);
3351 }
3352
3353 /*
3354 * If this was the outer most commit (the one that
3355 * changed the original pointer from HEAD to UPDATE),
3356 * then it is up to us to reset it to NORMAL.
3357 */
3358 if (type == RB_PAGE_HEAD) {
3359 ret = rb_head_page_set_normal(cpu_buffer, next_page,
3360 tail_page,
3361 RB_PAGE_UPDATE);
3362 if (RB_WARN_ON(cpu_buffer,
3363 ret != RB_PAGE_UPDATE))
3364 return -1;
3365 }
3366
3367 return 0;
3368 }
3369
3370 static inline void
rb_reset_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)3371 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
3372 unsigned long tail, struct rb_event_info *info)
3373 {
3374 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
3375 struct buffer_page *tail_page = info->tail_page;
3376 struct ring_buffer_event *event;
3377 unsigned long length = info->length;
3378
3379 /*
3380 * Only the event that crossed the page boundary
3381 * must fill the old tail_page with padding.
3382 */
3383 if (tail >= bsize) {
3384 /*
3385 * If the page was filled, then we still need
3386 * to update the real_end. Reset it to zero
3387 * and the reader will ignore it.
3388 */
3389 if (tail == bsize)
3390 tail_page->real_end = 0;
3391
3392 local_sub(length, &tail_page->write);
3393 return;
3394 }
3395
3396 event = __rb_page_index(tail_page, tail);
3397
3398 /*
3399 * Save the original length to the meta data.
3400 * This will be used by the reader to add lost event
3401 * counter.
3402 */
3403 tail_page->real_end = tail;
3404
3405 /*
3406 * If this event is bigger than the minimum size, then
3407 * we need to be careful that we don't subtract the
3408 * write counter enough to allow another writer to slip
3409 * in on this page.
3410 * We put in a discarded commit instead, to make sure
3411 * that this space is not used again, and this space will
3412 * not be accounted into 'entries_bytes'.
3413 *
3414 * If we are less than the minimum size, we don't need to
3415 * worry about it.
3416 */
3417 if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
3418 /* No room for any events */
3419
3420 /* Mark the rest of the page with padding */
3421 rb_event_set_padding(event);
3422
3423 /* Make sure the padding is visible before the write update */
3424 smp_wmb();
3425
3426 /* Set the write back to the previous setting */
3427 local_sub(length, &tail_page->write);
3428 return;
3429 }
3430
3431 /* Put in a discarded event */
3432 event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
3433 event->type_len = RINGBUF_TYPE_PADDING;
3434 /* time delta must be non zero */
3435 event->time_delta = 1;
3436
3437 /* account for padding bytes */
3438 local_add(bsize - tail, &cpu_buffer->entries_bytes);
3439
3440 /* Make sure the padding is visible before the tail_page->write update */
3441 smp_wmb();
3442
3443 /* Set write to end of buffer */
3444 length = (tail + length) - bsize;
3445 local_sub(length, &tail_page->write);
3446 }
3447
3448 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3449
3450 /*
3451 * This is the slow path, force gcc not to inline it.
3452 */
3453 static noinline struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)3454 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
3455 unsigned long tail, struct rb_event_info *info)
3456 {
3457 struct buffer_page *tail_page = info->tail_page;
3458 struct buffer_page *commit_page = cpu_buffer->commit_page;
3459 struct trace_buffer *buffer = cpu_buffer->buffer;
3460 struct buffer_page *next_page;
3461 int ret;
3462
3463 next_page = tail_page;
3464
3465 rb_inc_page(&next_page);
3466
3467 /*
3468 * If for some reason, we had an interrupt storm that made
3469 * it all the way around the buffer, bail, and warn
3470 * about it.
3471 */
3472 if (unlikely(next_page == commit_page)) {
3473 local_inc(&cpu_buffer->commit_overrun);
3474 goto out_reset;
3475 }
3476
3477 /*
3478 * This is where the fun begins!
3479 *
3480 * We are fighting against races between a reader that
3481 * could be on another CPU trying to swap its reader
3482 * page with the buffer head.
3483 *
3484 * We are also fighting against interrupts coming in and
3485 * moving the head or tail on us as well.
3486 *
3487 * If the next page is the head page then we have filled
3488 * the buffer, unless the commit page is still on the
3489 * reader page.
3490 */
3491 if (rb_is_head_page(next_page, &tail_page->list)) {
3492
3493 /*
3494 * If the commit is not on the reader page, then
3495 * move the header page.
3496 */
3497 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
3498 /*
3499 * If we are not in overwrite mode,
3500 * this is easy, just stop here.
3501 */
3502 if (!(buffer->flags & RB_FL_OVERWRITE)) {
3503 local_inc(&cpu_buffer->dropped_events);
3504 goto out_reset;
3505 }
3506
3507 ret = rb_handle_head_page(cpu_buffer,
3508 tail_page,
3509 next_page);
3510 if (ret < 0)
3511 goto out_reset;
3512 if (ret)
3513 goto out_again;
3514 } else {
3515 /*
3516 * We need to be careful here too. The
3517 * commit page could still be on the reader
3518 * page. We could have a small buffer, and
3519 * have filled up the buffer with events
3520 * from interrupts and such, and wrapped.
3521 *
3522 * Note, if the tail page is also on the
3523 * reader_page, we let it move out.
3524 */
3525 if (unlikely((cpu_buffer->commit_page !=
3526 cpu_buffer->tail_page) &&
3527 (cpu_buffer->commit_page ==
3528 cpu_buffer->reader_page))) {
3529 local_inc(&cpu_buffer->commit_overrun);
3530 goto out_reset;
3531 }
3532 }
3533 }
3534
3535 rb_tail_page_update(cpu_buffer, tail_page, next_page);
3536
3537 out_again:
3538
3539 rb_reset_tail(cpu_buffer, tail, info);
3540
3541 /* Commit what we have for now. */
3542 rb_end_commit(cpu_buffer);
3543 /* rb_end_commit() decs committing */
3544 local_inc(&cpu_buffer->committing);
3545
3546 /* fail and let the caller try again */
3547 return ERR_PTR(-EAGAIN);
3548
3549 out_reset:
3550 /* reset write */
3551 rb_reset_tail(cpu_buffer, tail, info);
3552
3553 return NULL;
3554 }
3555
3556 /* Slow path */
3557 static struct ring_buffer_event *
rb_add_time_stamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event,u64 delta,bool abs)3558 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3559 struct ring_buffer_event *event, u64 delta, bool abs)
3560 {
3561 if (abs)
3562 event->type_len = RINGBUF_TYPE_TIME_STAMP;
3563 else
3564 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
3565
3566 /* Not the first event on the page, or not delta? */
3567 if (abs || rb_event_index(cpu_buffer, event)) {
3568 event->time_delta = delta & TS_MASK;
3569 event->array[0] = delta >> TS_SHIFT;
3570 } else {
3571 /* nope, just zero it */
3572 event->time_delta = 0;
3573 event->array[0] = 0;
3574 }
3575
3576 return skip_time_extend(event);
3577 }
3578
3579 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
sched_clock_stable(void)3580 static inline bool sched_clock_stable(void)
3581 {
3582 return true;
3583 }
3584 #endif
3585
3586 static void
rb_check_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)3587 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
3588 struct rb_event_info *info)
3589 {
3590 u64 write_stamp;
3591
3592 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
3593 (unsigned long long)info->delta,
3594 (unsigned long long)info->ts,
3595 (unsigned long long)info->before,
3596 (unsigned long long)info->after,
3597 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
3598 sched_clock_stable() ? "" :
3599 "If you just came from a suspend/resume,\n"
3600 "please switch to the trace global clock:\n"
3601 " echo global > /sys/kernel/tracing/trace_clock\n"
3602 "or add trace_clock=global to the kernel command line\n");
3603 }
3604
rb_add_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event ** event,struct rb_event_info * info,u64 * delta,unsigned int * length)3605 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
3606 struct ring_buffer_event **event,
3607 struct rb_event_info *info,
3608 u64 *delta,
3609 unsigned int *length)
3610 {
3611 bool abs = info->add_timestamp &
3612 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
3613
3614 if (unlikely(info->delta > (1ULL << 59))) {
3615 /*
3616 * Some timers can use more than 59 bits, and when a timestamp
3617 * is added to the buffer, it will lose those bits.
3618 */
3619 if (abs && (info->ts & TS_MSB)) {
3620 info->delta &= ABS_TS_MASK;
3621
3622 /* did the clock go backwards */
3623 } else if (info->before == info->after && info->before > info->ts) {
3624 /* not interrupted */
3625 static int once;
3626
3627 /*
3628 * This is possible with a recalibrating of the TSC.
3629 * Do not produce a call stack, but just report it.
3630 */
3631 if (!once) {
3632 once++;
3633 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
3634 info->before, info->ts);
3635 }
3636 } else
3637 rb_check_timestamp(cpu_buffer, info);
3638 if (!abs)
3639 info->delta = 0;
3640 }
3641 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
3642 *length -= RB_LEN_TIME_EXTEND;
3643 *delta = 0;
3644 }
3645
3646 /**
3647 * rb_update_event - update event type and data
3648 * @cpu_buffer: The per cpu buffer of the @event
3649 * @event: the event to update
3650 * @info: The info to update the @event with (contains length and delta)
3651 *
3652 * Update the type and data fields of the @event. The length
3653 * is the actual size that is written to the ring buffer,
3654 * and with this, we can determine what to place into the
3655 * data field.
3656 */
3657 static void
rb_update_event(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event,struct rb_event_info * info)3658 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
3659 struct ring_buffer_event *event,
3660 struct rb_event_info *info)
3661 {
3662 unsigned length = info->length;
3663 u64 delta = info->delta;
3664 unsigned int nest = local_read(&cpu_buffer->committing) - 1;
3665
3666 if (!WARN_ON_ONCE(nest >= MAX_NEST))
3667 cpu_buffer->event_stamp[nest] = info->ts;
3668
3669 /*
3670 * If we need to add a timestamp, then we
3671 * add it to the start of the reserved space.
3672 */
3673 if (unlikely(info->add_timestamp))
3674 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
3675
3676 event->time_delta = delta;
3677 length -= RB_EVNT_HDR_SIZE;
3678 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
3679 event->type_len = 0;
3680 event->array[0] = length;
3681 } else
3682 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
3683 }
3684
rb_calculate_event_length(unsigned length)3685 static unsigned rb_calculate_event_length(unsigned length)
3686 {
3687 struct ring_buffer_event event; /* Used only for sizeof array */
3688
3689 /* zero length can cause confusions */
3690 if (!length)
3691 length++;
3692
3693 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
3694 length += sizeof(event.array[0]);
3695
3696 length += RB_EVNT_HDR_SIZE;
3697 length = ALIGN(length, RB_ARCH_ALIGNMENT);
3698
3699 /*
3700 * In case the time delta is larger than the 27 bits for it
3701 * in the header, we need to add a timestamp. If another
3702 * event comes in when trying to discard this one to increase
3703 * the length, then the timestamp will be added in the allocated
3704 * space of this event. If length is bigger than the size needed
3705 * for the TIME_EXTEND, then padding has to be used. The events
3706 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
3707 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
3708 * As length is a multiple of 4, we only need to worry if it
3709 * is 12 (RB_LEN_TIME_EXTEND + 4).
3710 */
3711 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
3712 length += RB_ALIGNMENT;
3713
3714 return length;
3715 }
3716
3717 static inline bool
rb_try_to_discard(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)3718 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
3719 struct ring_buffer_event *event)
3720 {
3721 unsigned long new_index, old_index;
3722 struct buffer_page *bpage;
3723 unsigned long addr;
3724
3725 new_index = rb_event_index(cpu_buffer, event);
3726 old_index = new_index + rb_event_ts_length(event);
3727 addr = (unsigned long)event;
3728 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3729
3730 bpage = READ_ONCE(cpu_buffer->tail_page);
3731
3732 /*
3733 * Make sure the tail_page is still the same and
3734 * the next write location is the end of this event
3735 */
3736 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
3737 unsigned long write_mask =
3738 local_read(&bpage->write) & ~RB_WRITE_MASK;
3739 unsigned long event_length = rb_event_length(event);
3740
3741 /*
3742 * For the before_stamp to be different than the write_stamp
3743 * to make sure that the next event adds an absolute
3744 * value and does not rely on the saved write stamp, which
3745 * is now going to be bogus.
3746 *
3747 * By setting the before_stamp to zero, the next event
3748 * is not going to use the write_stamp and will instead
3749 * create an absolute timestamp. This means there's no
3750 * reason to update the wirte_stamp!
3751 */
3752 rb_time_set(&cpu_buffer->before_stamp, 0);
3753
3754 /*
3755 * If an event were to come in now, it would see that the
3756 * write_stamp and the before_stamp are different, and assume
3757 * that this event just added itself before updating
3758 * the write stamp. The interrupting event will fix the
3759 * write stamp for us, and use an absolute timestamp.
3760 */
3761
3762 /*
3763 * This is on the tail page. It is possible that
3764 * a write could come in and move the tail page
3765 * and write to the next page. That is fine
3766 * because we just shorten what is on this page.
3767 */
3768 old_index += write_mask;
3769 new_index += write_mask;
3770
3771 /* caution: old_index gets updated on cmpxchg failure */
3772 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
3773 /* update counters */
3774 local_sub(event_length, &cpu_buffer->entries_bytes);
3775 return true;
3776 }
3777 }
3778
3779 /* could not discard */
3780 return false;
3781 }
3782
rb_start_commit(struct ring_buffer_per_cpu * cpu_buffer)3783 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
3784 {
3785 local_inc(&cpu_buffer->committing);
3786 local_inc(&cpu_buffer->commits);
3787 }
3788
3789 static __always_inline void
rb_set_commit_to_write(struct ring_buffer_per_cpu * cpu_buffer)3790 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3791 {
3792 unsigned long max_count;
3793
3794 /*
3795 * We only race with interrupts and NMIs on this CPU.
3796 * If we own the commit event, then we can commit
3797 * all others that interrupted us, since the interruptions
3798 * are in stack format (they finish before they come
3799 * back to us). This allows us to do a simple loop to
3800 * assign the commit to the tail.
3801 */
3802 again:
3803 max_count = cpu_buffer->nr_pages * 100;
3804
3805 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
3806 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
3807 return;
3808 if (RB_WARN_ON(cpu_buffer,
3809 rb_is_reader_page(cpu_buffer->tail_page)))
3810 return;
3811 /*
3812 * No need for a memory barrier here, as the update
3813 * of the tail_page did it for this page.
3814 */
3815 local_set(&cpu_buffer->commit_page->page->commit,
3816 rb_page_write(cpu_buffer->commit_page));
3817 rb_inc_page(&cpu_buffer->commit_page);
3818 if (cpu_buffer->ring_meta) {
3819 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3820 meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page;
3821 }
3822 /* add barrier to keep gcc from optimizing too much */
3823 barrier();
3824 }
3825 while (rb_commit_index(cpu_buffer) !=
3826 rb_page_write(cpu_buffer->commit_page)) {
3827
3828 /* Make sure the readers see the content of what is committed. */
3829 smp_wmb();
3830 local_set(&cpu_buffer->commit_page->page->commit,
3831 rb_page_write(cpu_buffer->commit_page));
3832 RB_WARN_ON(cpu_buffer,
3833 local_read(&cpu_buffer->commit_page->page->commit) &
3834 ~RB_WRITE_MASK);
3835 barrier();
3836 }
3837
3838 /* again, keep gcc from optimizing */
3839 barrier();
3840
3841 /*
3842 * If an interrupt came in just after the first while loop
3843 * and pushed the tail page forward, we will be left with
3844 * a dangling commit that will never go forward.
3845 */
3846 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3847 goto again;
3848 }
3849
rb_end_commit(struct ring_buffer_per_cpu * cpu_buffer)3850 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3851 {
3852 unsigned long commits;
3853
3854 if (RB_WARN_ON(cpu_buffer,
3855 !local_read(&cpu_buffer->committing)))
3856 return;
3857
3858 again:
3859 commits = local_read(&cpu_buffer->commits);
3860 /* synchronize with interrupts */
3861 barrier();
3862 if (local_read(&cpu_buffer->committing) == 1)
3863 rb_set_commit_to_write(cpu_buffer);
3864
3865 local_dec(&cpu_buffer->committing);
3866
3867 /* synchronize with interrupts */
3868 barrier();
3869
3870 /*
3871 * Need to account for interrupts coming in between the
3872 * updating of the commit page and the clearing of the
3873 * committing counter.
3874 */
3875 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3876 !local_read(&cpu_buffer->committing)) {
3877 local_inc(&cpu_buffer->committing);
3878 goto again;
3879 }
3880 }
3881
rb_event_discard(struct ring_buffer_event * event)3882 static inline void rb_event_discard(struct ring_buffer_event *event)
3883 {
3884 if (extended_time(event))
3885 event = skip_time_extend(event);
3886
3887 /* array[0] holds the actual length for the discarded event */
3888 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3889 event->type_len = RINGBUF_TYPE_PADDING;
3890 /* time delta must be non zero */
3891 if (!event->time_delta)
3892 event->time_delta = 1;
3893 }
3894
rb_commit(struct ring_buffer_per_cpu * cpu_buffer)3895 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3896 {
3897 local_inc(&cpu_buffer->entries);
3898 rb_end_commit(cpu_buffer);
3899 }
3900
3901 static __always_inline void
rb_wakeups(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer)3902 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3903 {
3904 if (buffer->irq_work.waiters_pending) {
3905 buffer->irq_work.waiters_pending = false;
3906 /* irq_work_queue() supplies it's own memory barriers */
3907 irq_work_queue(&buffer->irq_work.work);
3908 }
3909
3910 if (cpu_buffer->irq_work.waiters_pending) {
3911 cpu_buffer->irq_work.waiters_pending = false;
3912 /* irq_work_queue() supplies it's own memory barriers */
3913 irq_work_queue(&cpu_buffer->irq_work.work);
3914 }
3915
3916 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3917 return;
3918
3919 if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3920 return;
3921
3922 if (!cpu_buffer->irq_work.full_waiters_pending)
3923 return;
3924
3925 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3926
3927 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3928 return;
3929
3930 cpu_buffer->irq_work.wakeup_full = true;
3931 cpu_buffer->irq_work.full_waiters_pending = false;
3932 /* irq_work_queue() supplies it's own memory barriers */
3933 irq_work_queue(&cpu_buffer->irq_work.work);
3934 }
3935
3936 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3937 # define do_ring_buffer_record_recursion() \
3938 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3939 #else
3940 # define do_ring_buffer_record_recursion() do { } while (0)
3941 #endif
3942
3943 /*
3944 * The lock and unlock are done within a preempt disable section.
3945 * The current_context per_cpu variable can only be modified
3946 * by the current task between lock and unlock. But it can
3947 * be modified more than once via an interrupt. To pass this
3948 * information from the lock to the unlock without having to
3949 * access the 'in_interrupt()' functions again (which do show
3950 * a bit of overhead in something as critical as function tracing,
3951 * we use a bitmask trick.
3952 *
3953 * bit 1 = NMI context
3954 * bit 2 = IRQ context
3955 * bit 3 = SoftIRQ context
3956 * bit 4 = normal context.
3957 *
3958 * This works because this is the order of contexts that can
3959 * preempt other contexts. A SoftIRQ never preempts an IRQ
3960 * context.
3961 *
3962 * When the context is determined, the corresponding bit is
3963 * checked and set (if it was set, then a recursion of that context
3964 * happened).
3965 *
3966 * On unlock, we need to clear this bit. To do so, just subtract
3967 * 1 from the current_context and AND it to itself.
3968 *
3969 * (binary)
3970 * 101 - 1 = 100
3971 * 101 & 100 = 100 (clearing bit zero)
3972 *
3973 * 1010 - 1 = 1001
3974 * 1010 & 1001 = 1000 (clearing bit 1)
3975 *
3976 * The least significant bit can be cleared this way, and it
3977 * just so happens that it is the same bit corresponding to
3978 * the current context.
3979 *
3980 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3981 * is set when a recursion is detected at the current context, and if
3982 * the TRANSITION bit is already set, it will fail the recursion.
3983 * This is needed because there's a lag between the changing of
3984 * interrupt context and updating the preempt count. In this case,
3985 * a false positive will be found. To handle this, one extra recursion
3986 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3987 * bit is already set, then it is considered a recursion and the function
3988 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3989 *
3990 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3991 * to be cleared. Even if it wasn't the context that set it. That is,
3992 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3993 * is called before preempt_count() is updated, since the check will
3994 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3995 * NMI then comes in, it will set the NMI bit, but when the NMI code
3996 * does the trace_recursive_unlock() it will clear the TRANSITION bit
3997 * and leave the NMI bit set. But this is fine, because the interrupt
3998 * code that set the TRANSITION bit will then clear the NMI bit when it
3999 * calls trace_recursive_unlock(). If another NMI comes in, it will
4000 * set the TRANSITION bit and continue.
4001 *
4002 * Note: The TRANSITION bit only handles a single transition between context.
4003 */
4004
4005 static __always_inline bool
trace_recursive_lock(struct ring_buffer_per_cpu * cpu_buffer)4006 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
4007 {
4008 unsigned int val = cpu_buffer->current_context;
4009 int bit = interrupt_context_level();
4010
4011 bit = RB_CTX_NORMAL - bit;
4012
4013 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
4014 /*
4015 * It is possible that this was called by transitioning
4016 * between interrupt context, and preempt_count() has not
4017 * been updated yet. In this case, use the TRANSITION bit.
4018 */
4019 bit = RB_CTX_TRANSITION;
4020 if (val & (1 << (bit + cpu_buffer->nest))) {
4021 do_ring_buffer_record_recursion();
4022 return true;
4023 }
4024 }
4025
4026 val |= (1 << (bit + cpu_buffer->nest));
4027 cpu_buffer->current_context = val;
4028
4029 return false;
4030 }
4031
4032 static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu * cpu_buffer)4033 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
4034 {
4035 cpu_buffer->current_context &=
4036 cpu_buffer->current_context - (1 << cpu_buffer->nest);
4037 }
4038
4039 /* The recursive locking above uses 5 bits */
4040 #define NESTED_BITS 5
4041
4042 /**
4043 * ring_buffer_nest_start - Allow to trace while nested
4044 * @buffer: The ring buffer to modify
4045 *
4046 * The ring buffer has a safety mechanism to prevent recursion.
4047 * But there may be a case where a trace needs to be done while
4048 * tracing something else. In this case, calling this function
4049 * will allow this function to nest within a currently active
4050 * ring_buffer_lock_reserve().
4051 *
4052 * Call this function before calling another ring_buffer_lock_reserve() and
4053 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
4054 */
ring_buffer_nest_start(struct trace_buffer * buffer)4055 void ring_buffer_nest_start(struct trace_buffer *buffer)
4056 {
4057 struct ring_buffer_per_cpu *cpu_buffer;
4058 int cpu;
4059
4060 /* Enabled by ring_buffer_nest_end() */
4061 preempt_disable_notrace();
4062 cpu = raw_smp_processor_id();
4063 cpu_buffer = buffer->buffers[cpu];
4064 /* This is the shift value for the above recursive locking */
4065 cpu_buffer->nest += NESTED_BITS;
4066 }
4067
4068 /**
4069 * ring_buffer_nest_end - Allow to trace while nested
4070 * @buffer: The ring buffer to modify
4071 *
4072 * Must be called after ring_buffer_nest_start() and after the
4073 * ring_buffer_unlock_commit().
4074 */
ring_buffer_nest_end(struct trace_buffer * buffer)4075 void ring_buffer_nest_end(struct trace_buffer *buffer)
4076 {
4077 struct ring_buffer_per_cpu *cpu_buffer;
4078 int cpu;
4079
4080 /* disabled by ring_buffer_nest_start() */
4081 cpu = raw_smp_processor_id();
4082 cpu_buffer = buffer->buffers[cpu];
4083 /* This is the shift value for the above recursive locking */
4084 cpu_buffer->nest -= NESTED_BITS;
4085 preempt_enable_notrace();
4086 }
4087
4088 /**
4089 * ring_buffer_unlock_commit - commit a reserved
4090 * @buffer: The buffer to commit to
4091 *
4092 * This commits the data to the ring buffer, and releases any locks held.
4093 *
4094 * Must be paired with ring_buffer_lock_reserve.
4095 */
ring_buffer_unlock_commit(struct trace_buffer * buffer)4096 int ring_buffer_unlock_commit(struct trace_buffer *buffer)
4097 {
4098 struct ring_buffer_per_cpu *cpu_buffer;
4099 int cpu = raw_smp_processor_id();
4100
4101 cpu_buffer = buffer->buffers[cpu];
4102
4103 rb_commit(cpu_buffer);
4104
4105 rb_wakeups(buffer, cpu_buffer);
4106
4107 trace_recursive_unlock(cpu_buffer);
4108
4109 preempt_enable_notrace();
4110
4111 return 0;
4112 }
4113 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
4114
4115 /* Special value to validate all deltas on a page. */
4116 #define CHECK_FULL_PAGE 1L
4117
4118 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
4119
show_irq_str(int bits)4120 static const char *show_irq_str(int bits)
4121 {
4122 const char *type[] = {
4123 ".", // 0
4124 "s", // 1
4125 "h", // 2
4126 "Hs", // 3
4127 "n", // 4
4128 "Ns", // 5
4129 "Nh", // 6
4130 "NHs", // 7
4131 };
4132
4133 return type[bits];
4134 }
4135
4136 /* Assume this is a trace event */
show_flags(struct ring_buffer_event * event)4137 static const char *show_flags(struct ring_buffer_event *event)
4138 {
4139 struct trace_entry *entry;
4140 int bits = 0;
4141
4142 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4143 return "X";
4144
4145 entry = ring_buffer_event_data(event);
4146
4147 if (entry->flags & TRACE_FLAG_SOFTIRQ)
4148 bits |= 1;
4149
4150 if (entry->flags & TRACE_FLAG_HARDIRQ)
4151 bits |= 2;
4152
4153 if (entry->flags & TRACE_FLAG_NMI)
4154 bits |= 4;
4155
4156 return show_irq_str(bits);
4157 }
4158
show_irq(struct ring_buffer_event * event)4159 static const char *show_irq(struct ring_buffer_event *event)
4160 {
4161 struct trace_entry *entry;
4162
4163 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4164 return "";
4165
4166 entry = ring_buffer_event_data(event);
4167 if (entry->flags & TRACE_FLAG_IRQS_OFF)
4168 return "d";
4169 return "";
4170 }
4171
show_interrupt_level(void)4172 static const char *show_interrupt_level(void)
4173 {
4174 unsigned long pc = preempt_count();
4175 unsigned char level = 0;
4176
4177 if (pc & SOFTIRQ_OFFSET)
4178 level |= 1;
4179
4180 if (pc & HARDIRQ_MASK)
4181 level |= 2;
4182
4183 if (pc & NMI_MASK)
4184 level |= 4;
4185
4186 return show_irq_str(level);
4187 }
4188
dump_buffer_page(struct buffer_data_page * bpage,struct rb_event_info * info,unsigned long tail)4189 static void dump_buffer_page(struct buffer_data_page *bpage,
4190 struct rb_event_info *info,
4191 unsigned long tail)
4192 {
4193 struct ring_buffer_event *event;
4194 u64 ts, delta;
4195 int e;
4196
4197 ts = bpage->time_stamp;
4198 pr_warn(" [%lld] PAGE TIME STAMP\n", ts);
4199
4200 for (e = 0; e < tail; e += rb_event_length(event)) {
4201
4202 event = (struct ring_buffer_event *)(bpage->data + e);
4203
4204 switch (event->type_len) {
4205
4206 case RINGBUF_TYPE_TIME_EXTEND:
4207 delta = rb_event_time_stamp(event);
4208 ts += delta;
4209 pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
4210 e, ts, delta);
4211 break;
4212
4213 case RINGBUF_TYPE_TIME_STAMP:
4214 delta = rb_event_time_stamp(event);
4215 ts = rb_fix_abs_ts(delta, ts);
4216 pr_warn(" 0x%x: [%lld] absolute:%lld TIME STAMP\n",
4217 e, ts, delta);
4218 break;
4219
4220 case RINGBUF_TYPE_PADDING:
4221 ts += event->time_delta;
4222 pr_warn(" 0x%x: [%lld] delta:%d PADDING\n",
4223 e, ts, event->time_delta);
4224 break;
4225
4226 case RINGBUF_TYPE_DATA:
4227 ts += event->time_delta;
4228 pr_warn(" 0x%x: [%lld] delta:%d %s%s\n",
4229 e, ts, event->time_delta,
4230 show_flags(event), show_irq(event));
4231 break;
4232
4233 default:
4234 break;
4235 }
4236 }
4237 pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
4238 }
4239
4240 static DEFINE_PER_CPU(atomic_t, checking);
4241 static atomic_t ts_dump;
4242
4243 #define buffer_warn_return(fmt, ...) \
4244 do { \
4245 /* If another report is happening, ignore this one */ \
4246 if (atomic_inc_return(&ts_dump) != 1) { \
4247 atomic_dec(&ts_dump); \
4248 goto out; \
4249 } \
4250 atomic_inc(&cpu_buffer->record_disabled); \
4251 pr_warn(fmt, ##__VA_ARGS__); \
4252 dump_buffer_page(bpage, info, tail); \
4253 atomic_dec(&ts_dump); \
4254 /* There's some cases in boot up that this can happen */ \
4255 if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING)) \
4256 /* Do not re-enable checking */ \
4257 return; \
4258 } while (0)
4259
4260 /*
4261 * Check if the current event time stamp matches the deltas on
4262 * the buffer page.
4263 */
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)4264 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
4265 struct rb_event_info *info,
4266 unsigned long tail)
4267 {
4268 struct buffer_data_page *bpage;
4269 u64 ts, delta;
4270 bool full = false;
4271 int ret;
4272
4273 bpage = info->tail_page->page;
4274
4275 if (tail == CHECK_FULL_PAGE) {
4276 full = true;
4277 tail = local_read(&bpage->commit);
4278 } else if (info->add_timestamp &
4279 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
4280 /* Ignore events with absolute time stamps */
4281 return;
4282 }
4283
4284 /*
4285 * Do not check the first event (skip possible extends too).
4286 * Also do not check if previous events have not been committed.
4287 */
4288 if (tail <= 8 || tail > local_read(&bpage->commit))
4289 return;
4290
4291 /*
4292 * If this interrupted another event,
4293 */
4294 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
4295 goto out;
4296
4297 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
4298 if (ret < 0) {
4299 if (delta < ts) {
4300 buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
4301 cpu_buffer->cpu, ts, delta);
4302 goto out;
4303 }
4304 }
4305 if ((full && ts > info->ts) ||
4306 (!full && ts + info->delta != info->ts)) {
4307 buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
4308 cpu_buffer->cpu,
4309 ts + info->delta, info->ts, info->delta,
4310 info->before, info->after,
4311 full ? " (full)" : "", show_interrupt_level());
4312 }
4313 out:
4314 atomic_dec(this_cpu_ptr(&checking));
4315 }
4316 #else
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)4317 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
4318 struct rb_event_info *info,
4319 unsigned long tail)
4320 {
4321 }
4322 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
4323
4324 static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)4325 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
4326 struct rb_event_info *info)
4327 {
4328 struct ring_buffer_event *event;
4329 struct buffer_page *tail_page;
4330 unsigned long tail, write, w;
4331
4332 /* Don't let the compiler play games with cpu_buffer->tail_page */
4333 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
4334
4335 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK;
4336 barrier();
4337 rb_time_read(&cpu_buffer->before_stamp, &info->before);
4338 rb_time_read(&cpu_buffer->write_stamp, &info->after);
4339 barrier();
4340 info->ts = rb_time_stamp(cpu_buffer->buffer);
4341
4342 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
4343 info->delta = info->ts;
4344 } else {
4345 /*
4346 * If interrupting an event time update, we may need an
4347 * absolute timestamp.
4348 * Don't bother if this is the start of a new page (w == 0).
4349 */
4350 if (!w) {
4351 /* Use the sub-buffer timestamp */
4352 info->delta = 0;
4353 } else if (unlikely(info->before != info->after)) {
4354 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
4355 info->length += RB_LEN_TIME_EXTEND;
4356 } else {
4357 info->delta = info->ts - info->after;
4358 if (unlikely(test_time_stamp(info->delta))) {
4359 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
4360 info->length += RB_LEN_TIME_EXTEND;
4361 }
4362 }
4363 }
4364
4365 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts);
4366
4367 /*C*/ write = local_add_return(info->length, &tail_page->write);
4368
4369 /* set write to only the index of the write */
4370 write &= RB_WRITE_MASK;
4371
4372 tail = write - info->length;
4373
4374 /* See if we shot pass the end of this buffer page */
4375 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
4376 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
4377 return rb_move_tail(cpu_buffer, tail, info);
4378 }
4379
4380 if (likely(tail == w)) {
4381 /* Nothing interrupted us between A and C */
4382 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
4383 /*
4384 * If something came in between C and D, the write stamp
4385 * may now not be in sync. But that's fine as the before_stamp
4386 * will be different and then next event will just be forced
4387 * to use an absolute timestamp.
4388 */
4389 if (likely(!(info->add_timestamp &
4390 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4391 /* This did not interrupt any time update */
4392 info->delta = info->ts - info->after;
4393 else
4394 /* Just use full timestamp for interrupting event */
4395 info->delta = info->ts;
4396 check_buffer(cpu_buffer, info, tail);
4397 } else {
4398 u64 ts;
4399 /* SLOW PATH - Interrupted between A and C */
4400
4401 /* Save the old before_stamp */
4402 rb_time_read(&cpu_buffer->before_stamp, &info->before);
4403
4404 /*
4405 * Read a new timestamp and update the before_stamp to make
4406 * the next event after this one force using an absolute
4407 * timestamp. This is in case an interrupt were to come in
4408 * between E and F.
4409 */
4410 ts = rb_time_stamp(cpu_buffer->buffer);
4411 rb_time_set(&cpu_buffer->before_stamp, ts);
4412
4413 barrier();
4414 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after);
4415 barrier();
4416 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
4417 info->after == info->before && info->after < ts) {
4418 /*
4419 * Nothing came after this event between C and F, it is
4420 * safe to use info->after for the delta as it
4421 * matched info->before and is still valid.
4422 */
4423 info->delta = ts - info->after;
4424 } else {
4425 /*
4426 * Interrupted between C and F:
4427 * Lost the previous events time stamp. Just set the
4428 * delta to zero, and this will be the same time as
4429 * the event this event interrupted. And the events that
4430 * came after this will still be correct (as they would
4431 * have built their delta on the previous event.
4432 */
4433 info->delta = 0;
4434 }
4435 info->ts = ts;
4436 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
4437 }
4438
4439 /*
4440 * If this is the first commit on the page, then it has the same
4441 * timestamp as the page itself.
4442 */
4443 if (unlikely(!tail && !(info->add_timestamp &
4444 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4445 info->delta = 0;
4446
4447 /* We reserved something on the buffer */
4448
4449 event = __rb_page_index(tail_page, tail);
4450 rb_update_event(cpu_buffer, event, info);
4451
4452 local_inc(&tail_page->entries);
4453
4454 /*
4455 * If this is the first commit on the page, then update
4456 * its timestamp.
4457 */
4458 if (unlikely(!tail))
4459 tail_page->page->time_stamp = info->ts;
4460
4461 /* account for these added bytes */
4462 local_add(info->length, &cpu_buffer->entries_bytes);
4463
4464 return event;
4465 }
4466
4467 static __always_inline struct ring_buffer_event *
rb_reserve_next_event(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer,unsigned long length)4468 rb_reserve_next_event(struct trace_buffer *buffer,
4469 struct ring_buffer_per_cpu *cpu_buffer,
4470 unsigned long length)
4471 {
4472 struct ring_buffer_event *event;
4473 struct rb_event_info info;
4474 int nr_loops = 0;
4475 int add_ts_default;
4476
4477 /*
4478 * ring buffer does cmpxchg as well as atomic64 operations
4479 * (which some archs use locking for atomic64), make sure this
4480 * is safe in NMI context
4481 */
4482 if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
4483 IS_ENABLED(CONFIG_GENERIC_ATOMIC64)) &&
4484 (unlikely(in_nmi()))) {
4485 return NULL;
4486 }
4487
4488 rb_start_commit(cpu_buffer);
4489 /* The commit page can not change after this */
4490
4491 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4492 /*
4493 * Due to the ability to swap a cpu buffer from a buffer
4494 * it is possible it was swapped before we committed.
4495 * (committing stops a swap). We check for it here and
4496 * if it happened, we have to fail the write.
4497 */
4498 barrier();
4499 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
4500 local_dec(&cpu_buffer->committing);
4501 local_dec(&cpu_buffer->commits);
4502 return NULL;
4503 }
4504 #endif
4505
4506 info.length = rb_calculate_event_length(length);
4507
4508 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
4509 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
4510 info.length += RB_LEN_TIME_EXTEND;
4511 if (info.length > cpu_buffer->buffer->max_data_size)
4512 goto out_fail;
4513 } else {
4514 add_ts_default = RB_ADD_STAMP_NONE;
4515 }
4516
4517 again:
4518 info.add_timestamp = add_ts_default;
4519 info.delta = 0;
4520
4521 /*
4522 * We allow for interrupts to reenter here and do a trace.
4523 * If one does, it will cause this original code to loop
4524 * back here. Even with heavy interrupts happening, this
4525 * should only happen a few times in a row. If this happens
4526 * 1000 times in a row, there must be either an interrupt
4527 * storm or we have something buggy.
4528 * Bail!
4529 */
4530 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
4531 goto out_fail;
4532
4533 event = __rb_reserve_next(cpu_buffer, &info);
4534
4535 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
4536 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
4537 info.length -= RB_LEN_TIME_EXTEND;
4538 goto again;
4539 }
4540
4541 if (likely(event))
4542 return event;
4543 out_fail:
4544 rb_end_commit(cpu_buffer);
4545 return NULL;
4546 }
4547
4548 /**
4549 * ring_buffer_lock_reserve - reserve a part of the buffer
4550 * @buffer: the ring buffer to reserve from
4551 * @length: the length of the data to reserve (excluding event header)
4552 *
4553 * Returns a reserved event on the ring buffer to copy directly to.
4554 * The user of this interface will need to get the body to write into
4555 * and can use the ring_buffer_event_data() interface.
4556 *
4557 * The length is the length of the data needed, not the event length
4558 * which also includes the event header.
4559 *
4560 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
4561 * If NULL is returned, then nothing has been allocated or locked.
4562 */
4563 struct ring_buffer_event *
ring_buffer_lock_reserve(struct trace_buffer * buffer,unsigned long length)4564 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
4565 {
4566 struct ring_buffer_per_cpu *cpu_buffer;
4567 struct ring_buffer_event *event;
4568 int cpu;
4569
4570 /* If we are tracing schedule, we don't want to recurse */
4571 preempt_disable_notrace();
4572
4573 if (unlikely(atomic_read(&buffer->record_disabled)))
4574 goto out;
4575
4576 cpu = raw_smp_processor_id();
4577
4578 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
4579 goto out;
4580
4581 cpu_buffer = buffer->buffers[cpu];
4582
4583 if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
4584 goto out;
4585
4586 if (unlikely(length > buffer->max_data_size))
4587 goto out;
4588
4589 if (unlikely(trace_recursive_lock(cpu_buffer)))
4590 goto out;
4591
4592 event = rb_reserve_next_event(buffer, cpu_buffer, length);
4593 if (!event)
4594 goto out_unlock;
4595
4596 return event;
4597
4598 out_unlock:
4599 trace_recursive_unlock(cpu_buffer);
4600 out:
4601 preempt_enable_notrace();
4602 return NULL;
4603 }
4604 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
4605
4606 /*
4607 * Decrement the entries to the page that an event is on.
4608 * The event does not even need to exist, only the pointer
4609 * to the page it is on. This may only be called before the commit
4610 * takes place.
4611 */
4612 static inline void
rb_decrement_entry(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)4613 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
4614 struct ring_buffer_event *event)
4615 {
4616 unsigned long addr = (unsigned long)event;
4617 struct buffer_page *bpage = cpu_buffer->commit_page;
4618 struct buffer_page *start;
4619
4620 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
4621
4622 /* Do the likely case first */
4623 if (likely(bpage->page == (void *)addr)) {
4624 local_dec(&bpage->entries);
4625 return;
4626 }
4627
4628 /*
4629 * Because the commit page may be on the reader page we
4630 * start with the next page and check the end loop there.
4631 */
4632 rb_inc_page(&bpage);
4633 start = bpage;
4634 do {
4635 if (bpage->page == (void *)addr) {
4636 local_dec(&bpage->entries);
4637 return;
4638 }
4639 rb_inc_page(&bpage);
4640 } while (bpage != start);
4641
4642 /* commit not part of this buffer?? */
4643 RB_WARN_ON(cpu_buffer, 1);
4644 }
4645
4646 /**
4647 * ring_buffer_discard_commit - discard an event that has not been committed
4648 * @buffer: the ring buffer
4649 * @event: non committed event to discard
4650 *
4651 * Sometimes an event that is in the ring buffer needs to be ignored.
4652 * This function lets the user discard an event in the ring buffer
4653 * and then that event will not be read later.
4654 *
4655 * This function only works if it is called before the item has been
4656 * committed. It will try to free the event from the ring buffer
4657 * if another event has not been added behind it.
4658 *
4659 * If another event has been added behind it, it will set the event
4660 * up as discarded, and perform the commit.
4661 *
4662 * If this function is called, do not call ring_buffer_unlock_commit on
4663 * the event.
4664 */
ring_buffer_discard_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)4665 void ring_buffer_discard_commit(struct trace_buffer *buffer,
4666 struct ring_buffer_event *event)
4667 {
4668 struct ring_buffer_per_cpu *cpu_buffer;
4669 int cpu;
4670
4671 /* The event is discarded regardless */
4672 rb_event_discard(event);
4673
4674 cpu = smp_processor_id();
4675 cpu_buffer = buffer->buffers[cpu];
4676
4677 /*
4678 * This must only be called if the event has not been
4679 * committed yet. Thus we can assume that preemption
4680 * is still disabled.
4681 */
4682 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
4683
4684 rb_decrement_entry(cpu_buffer, event);
4685 if (rb_try_to_discard(cpu_buffer, event))
4686 goto out;
4687
4688 out:
4689 rb_end_commit(cpu_buffer);
4690
4691 trace_recursive_unlock(cpu_buffer);
4692
4693 preempt_enable_notrace();
4694
4695 }
4696 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
4697
4698 /**
4699 * ring_buffer_write - write data to the buffer without reserving
4700 * @buffer: The ring buffer to write to.
4701 * @length: The length of the data being written (excluding the event header)
4702 * @data: The data to write to the buffer.
4703 *
4704 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
4705 * one function. If you already have the data to write to the buffer, it
4706 * may be easier to simply call this function.
4707 *
4708 * Note, like ring_buffer_lock_reserve, the length is the length of the data
4709 * and not the length of the event which would hold the header.
4710 */
ring_buffer_write(struct trace_buffer * buffer,unsigned long length,void * data)4711 int ring_buffer_write(struct trace_buffer *buffer,
4712 unsigned long length,
4713 void *data)
4714 {
4715 struct ring_buffer_per_cpu *cpu_buffer;
4716 struct ring_buffer_event *event;
4717 void *body;
4718 int ret = -EBUSY;
4719 int cpu;
4720
4721 preempt_disable_notrace();
4722
4723 if (atomic_read(&buffer->record_disabled))
4724 goto out;
4725
4726 cpu = raw_smp_processor_id();
4727
4728 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4729 goto out;
4730
4731 cpu_buffer = buffer->buffers[cpu];
4732
4733 if (atomic_read(&cpu_buffer->record_disabled))
4734 goto out;
4735
4736 if (length > buffer->max_data_size)
4737 goto out;
4738
4739 if (unlikely(trace_recursive_lock(cpu_buffer)))
4740 goto out;
4741
4742 event = rb_reserve_next_event(buffer, cpu_buffer, length);
4743 if (!event)
4744 goto out_unlock;
4745
4746 body = rb_event_data(event);
4747
4748 memcpy(body, data, length);
4749
4750 rb_commit(cpu_buffer);
4751
4752 rb_wakeups(buffer, cpu_buffer);
4753
4754 ret = 0;
4755
4756 out_unlock:
4757 trace_recursive_unlock(cpu_buffer);
4758
4759 out:
4760 preempt_enable_notrace();
4761
4762 return ret;
4763 }
4764 EXPORT_SYMBOL_GPL(ring_buffer_write);
4765
4766 /*
4767 * The total entries in the ring buffer is the running counter
4768 * of entries entered into the ring buffer, minus the sum of
4769 * the entries read from the ring buffer and the number of
4770 * entries that were overwritten.
4771 */
4772 static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu * cpu_buffer)4773 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4774 {
4775 return local_read(&cpu_buffer->entries) -
4776 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4777 }
4778
rb_per_cpu_empty(struct ring_buffer_per_cpu * cpu_buffer)4779 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
4780 {
4781 return !rb_num_of_entries(cpu_buffer);
4782 }
4783
4784 /**
4785 * ring_buffer_record_disable - stop all writes into the buffer
4786 * @buffer: The ring buffer to stop writes to.
4787 *
4788 * This prevents all writes to the buffer. Any attempt to write
4789 * to the buffer after this will fail and return NULL.
4790 *
4791 * The caller should call synchronize_rcu() after this.
4792 */
ring_buffer_record_disable(struct trace_buffer * buffer)4793 void ring_buffer_record_disable(struct trace_buffer *buffer)
4794 {
4795 atomic_inc(&buffer->record_disabled);
4796 }
4797 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
4798
4799 /**
4800 * ring_buffer_record_enable - enable writes to the buffer
4801 * @buffer: The ring buffer to enable writes
4802 *
4803 * Note, multiple disables will need the same number of enables
4804 * to truly enable the writing (much like preempt_disable).
4805 */
ring_buffer_record_enable(struct trace_buffer * buffer)4806 void ring_buffer_record_enable(struct trace_buffer *buffer)
4807 {
4808 atomic_dec(&buffer->record_disabled);
4809 }
4810 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
4811
4812 /**
4813 * ring_buffer_record_off - stop all writes into the buffer
4814 * @buffer: The ring buffer to stop writes to.
4815 *
4816 * This prevents all writes to the buffer. Any attempt to write
4817 * to the buffer after this will fail and return NULL.
4818 *
4819 * This is different than ring_buffer_record_disable() as
4820 * it works like an on/off switch, where as the disable() version
4821 * must be paired with a enable().
4822 */
ring_buffer_record_off(struct trace_buffer * buffer)4823 void ring_buffer_record_off(struct trace_buffer *buffer)
4824 {
4825 unsigned int rd;
4826 unsigned int new_rd;
4827
4828 rd = atomic_read(&buffer->record_disabled);
4829 do {
4830 new_rd = rd | RB_BUFFER_OFF;
4831 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4832 }
4833 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4834
4835 /**
4836 * ring_buffer_record_on - restart writes into the buffer
4837 * @buffer: The ring buffer to start writes to.
4838 *
4839 * This enables all writes to the buffer that was disabled by
4840 * ring_buffer_record_off().
4841 *
4842 * This is different than ring_buffer_record_enable() as
4843 * it works like an on/off switch, where as the enable() version
4844 * must be paired with a disable().
4845 */
ring_buffer_record_on(struct trace_buffer * buffer)4846 void ring_buffer_record_on(struct trace_buffer *buffer)
4847 {
4848 unsigned int rd;
4849 unsigned int new_rd;
4850
4851 rd = atomic_read(&buffer->record_disabled);
4852 do {
4853 new_rd = rd & ~RB_BUFFER_OFF;
4854 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4855 }
4856 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4857
4858 /**
4859 * ring_buffer_record_is_on - return true if the ring buffer can write
4860 * @buffer: The ring buffer to see if write is enabled
4861 *
4862 * Returns true if the ring buffer is in a state that it accepts writes.
4863 */
ring_buffer_record_is_on(struct trace_buffer * buffer)4864 bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4865 {
4866 return !atomic_read(&buffer->record_disabled);
4867 }
4868
4869 /**
4870 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4871 * @buffer: The ring buffer to see if write is set enabled
4872 *
4873 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4874 * Note that this does NOT mean it is in a writable state.
4875 *
4876 * It may return true when the ring buffer has been disabled by
4877 * ring_buffer_record_disable(), as that is a temporary disabling of
4878 * the ring buffer.
4879 */
ring_buffer_record_is_set_on(struct trace_buffer * buffer)4880 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4881 {
4882 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4883 }
4884
4885 /**
4886 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4887 * @buffer: The ring buffer to stop writes to.
4888 * @cpu: The CPU buffer to stop
4889 *
4890 * This prevents all writes to the buffer. Any attempt to write
4891 * to the buffer after this will fail and return NULL.
4892 *
4893 * The caller should call synchronize_rcu() after this.
4894 */
ring_buffer_record_disable_cpu(struct trace_buffer * buffer,int cpu)4895 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4896 {
4897 struct ring_buffer_per_cpu *cpu_buffer;
4898
4899 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4900 return;
4901
4902 cpu_buffer = buffer->buffers[cpu];
4903 atomic_inc(&cpu_buffer->record_disabled);
4904 }
4905 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4906
4907 /**
4908 * ring_buffer_record_enable_cpu - enable writes to the buffer
4909 * @buffer: The ring buffer to enable writes
4910 * @cpu: The CPU to enable.
4911 *
4912 * Note, multiple disables will need the same number of enables
4913 * to truly enable the writing (much like preempt_disable).
4914 */
ring_buffer_record_enable_cpu(struct trace_buffer * buffer,int cpu)4915 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4916 {
4917 struct ring_buffer_per_cpu *cpu_buffer;
4918
4919 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4920 return;
4921
4922 cpu_buffer = buffer->buffers[cpu];
4923 atomic_dec(&cpu_buffer->record_disabled);
4924 }
4925 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4926
4927 /**
4928 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4929 * @buffer: The ring buffer
4930 * @cpu: The per CPU buffer to read from.
4931 */
ring_buffer_oldest_event_ts(struct trace_buffer * buffer,int cpu)4932 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4933 {
4934 unsigned long flags;
4935 struct ring_buffer_per_cpu *cpu_buffer;
4936 struct buffer_page *bpage;
4937 u64 ret = 0;
4938
4939 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4940 return 0;
4941
4942 cpu_buffer = buffer->buffers[cpu];
4943 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4944 /*
4945 * if the tail is on reader_page, oldest time stamp is on the reader
4946 * page
4947 */
4948 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4949 bpage = cpu_buffer->reader_page;
4950 else
4951 bpage = rb_set_head_page(cpu_buffer);
4952 if (bpage)
4953 ret = bpage->page->time_stamp;
4954 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4955
4956 return ret;
4957 }
4958 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4959
4960 /**
4961 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4962 * @buffer: The ring buffer
4963 * @cpu: The per CPU buffer to read from.
4964 */
ring_buffer_bytes_cpu(struct trace_buffer * buffer,int cpu)4965 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4966 {
4967 struct ring_buffer_per_cpu *cpu_buffer;
4968 unsigned long ret;
4969
4970 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4971 return 0;
4972
4973 cpu_buffer = buffer->buffers[cpu];
4974 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4975
4976 return ret;
4977 }
4978 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4979
4980 /**
4981 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4982 * @buffer: The ring buffer
4983 * @cpu: The per CPU buffer to get the entries from.
4984 */
ring_buffer_entries_cpu(struct trace_buffer * buffer,int cpu)4985 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4986 {
4987 struct ring_buffer_per_cpu *cpu_buffer;
4988
4989 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4990 return 0;
4991
4992 cpu_buffer = buffer->buffers[cpu];
4993
4994 return rb_num_of_entries(cpu_buffer);
4995 }
4996 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4997
4998 /**
4999 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
5000 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
5001 * @buffer: The ring buffer
5002 * @cpu: The per CPU buffer to get the number of overruns from
5003 */
ring_buffer_overrun_cpu(struct trace_buffer * buffer,int cpu)5004 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
5005 {
5006 struct ring_buffer_per_cpu *cpu_buffer;
5007 unsigned long ret;
5008
5009 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5010 return 0;
5011
5012 cpu_buffer = buffer->buffers[cpu];
5013 ret = local_read(&cpu_buffer->overrun);
5014
5015 return ret;
5016 }
5017 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
5018
5019 /**
5020 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
5021 * commits failing due to the buffer wrapping around while there are uncommitted
5022 * events, such as during an interrupt storm.
5023 * @buffer: The ring buffer
5024 * @cpu: The per CPU buffer to get the number of overruns from
5025 */
5026 unsigned long
ring_buffer_commit_overrun_cpu(struct trace_buffer * buffer,int cpu)5027 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
5028 {
5029 struct ring_buffer_per_cpu *cpu_buffer;
5030 unsigned long ret;
5031
5032 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5033 return 0;
5034
5035 cpu_buffer = buffer->buffers[cpu];
5036 ret = local_read(&cpu_buffer->commit_overrun);
5037
5038 return ret;
5039 }
5040 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
5041
5042 /**
5043 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
5044 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
5045 * @buffer: The ring buffer
5046 * @cpu: The per CPU buffer to get the number of overruns from
5047 */
5048 unsigned long
ring_buffer_dropped_events_cpu(struct trace_buffer * buffer,int cpu)5049 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
5050 {
5051 struct ring_buffer_per_cpu *cpu_buffer;
5052 unsigned long ret;
5053
5054 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5055 return 0;
5056
5057 cpu_buffer = buffer->buffers[cpu];
5058 ret = local_read(&cpu_buffer->dropped_events);
5059
5060 return ret;
5061 }
5062 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
5063
5064 /**
5065 * ring_buffer_read_events_cpu - get the number of events successfully read
5066 * @buffer: The ring buffer
5067 * @cpu: The per CPU buffer to get the number of events read
5068 */
5069 unsigned long
ring_buffer_read_events_cpu(struct trace_buffer * buffer,int cpu)5070 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
5071 {
5072 struct ring_buffer_per_cpu *cpu_buffer;
5073
5074 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5075 return 0;
5076
5077 cpu_buffer = buffer->buffers[cpu];
5078 return cpu_buffer->read;
5079 }
5080 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
5081
5082 /**
5083 * ring_buffer_entries - get the number of entries in a buffer
5084 * @buffer: The ring buffer
5085 *
5086 * Returns the total number of entries in the ring buffer
5087 * (all CPU entries)
5088 */
ring_buffer_entries(struct trace_buffer * buffer)5089 unsigned long ring_buffer_entries(struct trace_buffer *buffer)
5090 {
5091 struct ring_buffer_per_cpu *cpu_buffer;
5092 unsigned long entries = 0;
5093 int cpu;
5094
5095 /* if you care about this being correct, lock the buffer */
5096 for_each_buffer_cpu(buffer, cpu) {
5097 cpu_buffer = buffer->buffers[cpu];
5098 entries += rb_num_of_entries(cpu_buffer);
5099 }
5100
5101 return entries;
5102 }
5103 EXPORT_SYMBOL_GPL(ring_buffer_entries);
5104
5105 /**
5106 * ring_buffer_overruns - get the number of overruns in buffer
5107 * @buffer: The ring buffer
5108 *
5109 * Returns the total number of overruns in the ring buffer
5110 * (all CPU entries)
5111 */
ring_buffer_overruns(struct trace_buffer * buffer)5112 unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
5113 {
5114 struct ring_buffer_per_cpu *cpu_buffer;
5115 unsigned long overruns = 0;
5116 int cpu;
5117
5118 /* if you care about this being correct, lock the buffer */
5119 for_each_buffer_cpu(buffer, cpu) {
5120 cpu_buffer = buffer->buffers[cpu];
5121 overruns += local_read(&cpu_buffer->overrun);
5122 }
5123
5124 return overruns;
5125 }
5126 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
5127
rb_iter_reset(struct ring_buffer_iter * iter)5128 static void rb_iter_reset(struct ring_buffer_iter *iter)
5129 {
5130 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5131
5132 /* Iterator usage is expected to have record disabled */
5133 iter->head_page = cpu_buffer->reader_page;
5134 iter->head = cpu_buffer->reader_page->read;
5135 iter->next_event = iter->head;
5136
5137 iter->cache_reader_page = iter->head_page;
5138 iter->cache_read = cpu_buffer->read;
5139 iter->cache_pages_removed = cpu_buffer->pages_removed;
5140
5141 if (iter->head) {
5142 iter->read_stamp = cpu_buffer->read_stamp;
5143 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
5144 } else {
5145 iter->read_stamp = iter->head_page->page->time_stamp;
5146 iter->page_stamp = iter->read_stamp;
5147 }
5148 }
5149
5150 /**
5151 * ring_buffer_iter_reset - reset an iterator
5152 * @iter: The iterator to reset
5153 *
5154 * Resets the iterator, so that it will start from the beginning
5155 * again.
5156 */
ring_buffer_iter_reset(struct ring_buffer_iter * iter)5157 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
5158 {
5159 struct ring_buffer_per_cpu *cpu_buffer;
5160 unsigned long flags;
5161
5162 if (!iter)
5163 return;
5164
5165 cpu_buffer = iter->cpu_buffer;
5166
5167 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5168 rb_iter_reset(iter);
5169 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5170 }
5171 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
5172
5173 /**
5174 * ring_buffer_iter_empty - check if an iterator has no more to read
5175 * @iter: The iterator to check
5176 */
ring_buffer_iter_empty(struct ring_buffer_iter * iter)5177 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
5178 {
5179 struct ring_buffer_per_cpu *cpu_buffer;
5180 struct buffer_page *reader;
5181 struct buffer_page *head_page;
5182 struct buffer_page *commit_page;
5183 struct buffer_page *curr_commit_page;
5184 unsigned commit;
5185 u64 curr_commit_ts;
5186 u64 commit_ts;
5187
5188 cpu_buffer = iter->cpu_buffer;
5189 reader = cpu_buffer->reader_page;
5190 head_page = cpu_buffer->head_page;
5191 commit_page = READ_ONCE(cpu_buffer->commit_page);
5192 commit_ts = commit_page->page->time_stamp;
5193
5194 /*
5195 * When the writer goes across pages, it issues a cmpxchg which
5196 * is a mb(), which will synchronize with the rmb here.
5197 * (see rb_tail_page_update())
5198 */
5199 smp_rmb();
5200 commit = rb_page_commit(commit_page);
5201 /* We want to make sure that the commit page doesn't change */
5202 smp_rmb();
5203
5204 /* Make sure commit page didn't change */
5205 curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
5206 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
5207
5208 /* If the commit page changed, then there's more data */
5209 if (curr_commit_page != commit_page ||
5210 curr_commit_ts != commit_ts)
5211 return 0;
5212
5213 /* Still racy, as it may return a false positive, but that's OK */
5214 return ((iter->head_page == commit_page && iter->head >= commit) ||
5215 (iter->head_page == reader && commit_page == head_page &&
5216 head_page->read == commit &&
5217 iter->head == rb_page_size(cpu_buffer->reader_page)));
5218 }
5219 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
5220
5221 static void
rb_update_read_stamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)5222 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
5223 struct ring_buffer_event *event)
5224 {
5225 u64 delta;
5226
5227 switch (event->type_len) {
5228 case RINGBUF_TYPE_PADDING:
5229 return;
5230
5231 case RINGBUF_TYPE_TIME_EXTEND:
5232 delta = rb_event_time_stamp(event);
5233 cpu_buffer->read_stamp += delta;
5234 return;
5235
5236 case RINGBUF_TYPE_TIME_STAMP:
5237 delta = rb_event_time_stamp(event);
5238 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
5239 cpu_buffer->read_stamp = delta;
5240 return;
5241
5242 case RINGBUF_TYPE_DATA:
5243 cpu_buffer->read_stamp += event->time_delta;
5244 return;
5245
5246 default:
5247 RB_WARN_ON(cpu_buffer, 1);
5248 }
5249 }
5250
5251 static void
rb_update_iter_read_stamp(struct ring_buffer_iter * iter,struct ring_buffer_event * event)5252 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
5253 struct ring_buffer_event *event)
5254 {
5255 u64 delta;
5256
5257 switch (event->type_len) {
5258 case RINGBUF_TYPE_PADDING:
5259 return;
5260
5261 case RINGBUF_TYPE_TIME_EXTEND:
5262 delta = rb_event_time_stamp(event);
5263 iter->read_stamp += delta;
5264 return;
5265
5266 case RINGBUF_TYPE_TIME_STAMP:
5267 delta = rb_event_time_stamp(event);
5268 delta = rb_fix_abs_ts(delta, iter->read_stamp);
5269 iter->read_stamp = delta;
5270 return;
5271
5272 case RINGBUF_TYPE_DATA:
5273 iter->read_stamp += event->time_delta;
5274 return;
5275
5276 default:
5277 RB_WARN_ON(iter->cpu_buffer, 1);
5278 }
5279 }
5280
5281 static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu * cpu_buffer)5282 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
5283 {
5284 struct buffer_page *reader = NULL;
5285 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
5286 unsigned long overwrite;
5287 unsigned long flags;
5288 int nr_loops = 0;
5289 bool ret;
5290
5291 local_irq_save(flags);
5292 arch_spin_lock(&cpu_buffer->lock);
5293
5294 again:
5295 /*
5296 * This should normally only loop twice. But because the
5297 * start of the reader inserts an empty page, it causes
5298 * a case where we will loop three times. There should be no
5299 * reason to loop four times (that I know of).
5300 */
5301 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
5302 reader = NULL;
5303 goto out;
5304 }
5305
5306 reader = cpu_buffer->reader_page;
5307
5308 /* If there's more to read, return this page */
5309 if (cpu_buffer->reader_page->read < rb_page_size(reader))
5310 goto out;
5311
5312 /* Never should we have an index greater than the size */
5313 if (RB_WARN_ON(cpu_buffer,
5314 cpu_buffer->reader_page->read > rb_page_size(reader)))
5315 goto out;
5316
5317 /* check if we caught up to the tail */
5318 reader = NULL;
5319 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
5320 goto out;
5321
5322 /* Don't bother swapping if the ring buffer is empty */
5323 if (rb_num_of_entries(cpu_buffer) == 0)
5324 goto out;
5325
5326 /*
5327 * Reset the reader page to size zero.
5328 */
5329 local_set(&cpu_buffer->reader_page->write, 0);
5330 local_set(&cpu_buffer->reader_page->entries, 0);
5331 local_set(&cpu_buffer->reader_page->page->commit, 0);
5332 cpu_buffer->reader_page->real_end = 0;
5333
5334 spin:
5335 /*
5336 * Splice the empty reader page into the list around the head.
5337 */
5338 reader = rb_set_head_page(cpu_buffer);
5339 if (!reader)
5340 goto out;
5341 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
5342 cpu_buffer->reader_page->list.prev = reader->list.prev;
5343
5344 /*
5345 * cpu_buffer->pages just needs to point to the buffer, it
5346 * has no specific buffer page to point to. Lets move it out
5347 * of our way so we don't accidentally swap it.
5348 */
5349 cpu_buffer->pages = reader->list.prev;
5350
5351 /* The reader page will be pointing to the new head */
5352 rb_set_list_to_head(&cpu_buffer->reader_page->list);
5353
5354 /*
5355 * We want to make sure we read the overruns after we set up our
5356 * pointers to the next object. The writer side does a
5357 * cmpxchg to cross pages which acts as the mb on the writer
5358 * side. Note, the reader will constantly fail the swap
5359 * while the writer is updating the pointers, so this
5360 * guarantees that the overwrite recorded here is the one we
5361 * want to compare with the last_overrun.
5362 */
5363 smp_mb();
5364 overwrite = local_read(&(cpu_buffer->overrun));
5365
5366 /*
5367 * Here's the tricky part.
5368 *
5369 * We need to move the pointer past the header page.
5370 * But we can only do that if a writer is not currently
5371 * moving it. The page before the header page has the
5372 * flag bit '1' set if it is pointing to the page we want.
5373 * but if the writer is in the process of moving it
5374 * then it will be '2' or already moved '0'.
5375 */
5376
5377 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
5378
5379 /*
5380 * If we did not convert it, then we must try again.
5381 */
5382 if (!ret)
5383 goto spin;
5384
5385 if (cpu_buffer->ring_meta)
5386 rb_update_meta_reader(cpu_buffer, reader);
5387
5388 /*
5389 * Yay! We succeeded in replacing the page.
5390 *
5391 * Now make the new head point back to the reader page.
5392 */
5393 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
5394 rb_inc_page(&cpu_buffer->head_page);
5395
5396 cpu_buffer->cnt++;
5397 local_inc(&cpu_buffer->pages_read);
5398
5399 /* Finally update the reader page to the new head */
5400 cpu_buffer->reader_page = reader;
5401 cpu_buffer->reader_page->read = 0;
5402
5403 if (overwrite != cpu_buffer->last_overrun) {
5404 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
5405 cpu_buffer->last_overrun = overwrite;
5406 }
5407
5408 goto again;
5409
5410 out:
5411 /* Update the read_stamp on the first event */
5412 if (reader && reader->read == 0)
5413 cpu_buffer->read_stamp = reader->page->time_stamp;
5414
5415 arch_spin_unlock(&cpu_buffer->lock);
5416 local_irq_restore(flags);
5417
5418 /*
5419 * The writer has preempt disable, wait for it. But not forever
5420 * Although, 1 second is pretty much "forever"
5421 */
5422 #define USECS_WAIT 1000000
5423 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
5424 /* If the write is past the end of page, a writer is still updating it */
5425 if (likely(!reader || rb_page_write(reader) <= bsize))
5426 break;
5427
5428 udelay(1);
5429
5430 /* Get the latest version of the reader write value */
5431 smp_rmb();
5432 }
5433
5434 /* The writer is not moving forward? Something is wrong */
5435 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
5436 reader = NULL;
5437
5438 /*
5439 * Make sure we see any padding after the write update
5440 * (see rb_reset_tail()).
5441 *
5442 * In addition, a writer may be writing on the reader page
5443 * if the page has not been fully filled, so the read barrier
5444 * is also needed to make sure we see the content of what is
5445 * committed by the writer (see rb_set_commit_to_write()).
5446 */
5447 smp_rmb();
5448
5449
5450 return reader;
5451 }
5452
rb_advance_reader(struct ring_buffer_per_cpu * cpu_buffer)5453 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
5454 {
5455 struct ring_buffer_event *event;
5456 struct buffer_page *reader;
5457 unsigned length;
5458
5459 reader = rb_get_reader_page(cpu_buffer);
5460
5461 /* This function should not be called when buffer is empty */
5462 if (RB_WARN_ON(cpu_buffer, !reader))
5463 return;
5464
5465 event = rb_reader_event(cpu_buffer);
5466
5467 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
5468 cpu_buffer->read++;
5469
5470 rb_update_read_stamp(cpu_buffer, event);
5471
5472 length = rb_event_length(event);
5473 cpu_buffer->reader_page->read += length;
5474 cpu_buffer->read_bytes += length;
5475 }
5476
rb_advance_iter(struct ring_buffer_iter * iter)5477 static void rb_advance_iter(struct ring_buffer_iter *iter)
5478 {
5479 struct ring_buffer_per_cpu *cpu_buffer;
5480
5481 cpu_buffer = iter->cpu_buffer;
5482
5483 /* If head == next_event then we need to jump to the next event */
5484 if (iter->head == iter->next_event) {
5485 /* If the event gets overwritten again, there's nothing to do */
5486 if (rb_iter_head_event(iter) == NULL)
5487 return;
5488 }
5489
5490 iter->head = iter->next_event;
5491
5492 /*
5493 * Check if we are at the end of the buffer.
5494 */
5495 if (iter->next_event >= rb_page_size(iter->head_page)) {
5496 /* discarded commits can make the page empty */
5497 if (iter->head_page == cpu_buffer->commit_page)
5498 return;
5499 rb_inc_iter(iter);
5500 return;
5501 }
5502
5503 rb_update_iter_read_stamp(iter, iter->event);
5504 }
5505
rb_lost_events(struct ring_buffer_per_cpu * cpu_buffer)5506 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
5507 {
5508 return cpu_buffer->lost_events;
5509 }
5510
5511 static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer_per_cpu * cpu_buffer,u64 * ts,unsigned long * lost_events)5512 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
5513 unsigned long *lost_events)
5514 {
5515 struct ring_buffer_event *event;
5516 struct buffer_page *reader;
5517 int nr_loops = 0;
5518
5519 if (ts)
5520 *ts = 0;
5521 again:
5522 /*
5523 * We repeat when a time extend is encountered.
5524 * Since the time extend is always attached to a data event,
5525 * we should never loop more than once.
5526 * (We never hit the following condition more than twice).
5527 */
5528 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
5529 return NULL;
5530
5531 reader = rb_get_reader_page(cpu_buffer);
5532 if (!reader)
5533 return NULL;
5534
5535 event = rb_reader_event(cpu_buffer);
5536
5537 switch (event->type_len) {
5538 case RINGBUF_TYPE_PADDING:
5539 if (rb_null_event(event))
5540 RB_WARN_ON(cpu_buffer, 1);
5541 /*
5542 * Because the writer could be discarding every
5543 * event it creates (which would probably be bad)
5544 * if we were to go back to "again" then we may never
5545 * catch up, and will trigger the warn on, or lock
5546 * the box. Return the padding, and we will release
5547 * the current locks, and try again.
5548 */
5549 return event;
5550
5551 case RINGBUF_TYPE_TIME_EXTEND:
5552 /* Internal data, OK to advance */
5553 rb_advance_reader(cpu_buffer);
5554 goto again;
5555
5556 case RINGBUF_TYPE_TIME_STAMP:
5557 if (ts) {
5558 *ts = rb_event_time_stamp(event);
5559 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
5560 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5561 cpu_buffer->cpu, ts);
5562 }
5563 /* Internal data, OK to advance */
5564 rb_advance_reader(cpu_buffer);
5565 goto again;
5566
5567 case RINGBUF_TYPE_DATA:
5568 if (ts && !(*ts)) {
5569 *ts = cpu_buffer->read_stamp + event->time_delta;
5570 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5571 cpu_buffer->cpu, ts);
5572 }
5573 if (lost_events)
5574 *lost_events = rb_lost_events(cpu_buffer);
5575 return event;
5576
5577 default:
5578 RB_WARN_ON(cpu_buffer, 1);
5579 }
5580
5581 return NULL;
5582 }
5583 EXPORT_SYMBOL_GPL(ring_buffer_peek);
5584
5585 static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter * iter,u64 * ts)5586 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5587 {
5588 struct trace_buffer *buffer;
5589 struct ring_buffer_per_cpu *cpu_buffer;
5590 struct ring_buffer_event *event;
5591 int nr_loops = 0;
5592
5593 if (ts)
5594 *ts = 0;
5595
5596 cpu_buffer = iter->cpu_buffer;
5597 buffer = cpu_buffer->buffer;
5598
5599 /*
5600 * Check if someone performed a consuming read to the buffer
5601 * or removed some pages from the buffer. In these cases,
5602 * iterator was invalidated and we need to reset it.
5603 */
5604 if (unlikely(iter->cache_read != cpu_buffer->read ||
5605 iter->cache_reader_page != cpu_buffer->reader_page ||
5606 iter->cache_pages_removed != cpu_buffer->pages_removed))
5607 rb_iter_reset(iter);
5608
5609 again:
5610 if (ring_buffer_iter_empty(iter))
5611 return NULL;
5612
5613 /*
5614 * As the writer can mess with what the iterator is trying
5615 * to read, just give up if we fail to get an event after
5616 * three tries. The iterator is not as reliable when reading
5617 * the ring buffer with an active write as the consumer is.
5618 * Do not warn if the three failures is reached.
5619 */
5620 if (++nr_loops > 3)
5621 return NULL;
5622
5623 if (rb_per_cpu_empty(cpu_buffer))
5624 return NULL;
5625
5626 if (iter->head >= rb_page_size(iter->head_page)) {
5627 rb_inc_iter(iter);
5628 goto again;
5629 }
5630
5631 event = rb_iter_head_event(iter);
5632 if (!event)
5633 goto again;
5634
5635 switch (event->type_len) {
5636 case RINGBUF_TYPE_PADDING:
5637 if (rb_null_event(event)) {
5638 rb_inc_iter(iter);
5639 goto again;
5640 }
5641 rb_advance_iter(iter);
5642 return event;
5643
5644 case RINGBUF_TYPE_TIME_EXTEND:
5645 /* Internal data, OK to advance */
5646 rb_advance_iter(iter);
5647 goto again;
5648
5649 case RINGBUF_TYPE_TIME_STAMP:
5650 if (ts) {
5651 *ts = rb_event_time_stamp(event);
5652 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
5653 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5654 cpu_buffer->cpu, ts);
5655 }
5656 /* Internal data, OK to advance */
5657 rb_advance_iter(iter);
5658 goto again;
5659
5660 case RINGBUF_TYPE_DATA:
5661 if (ts && !(*ts)) {
5662 *ts = iter->read_stamp + event->time_delta;
5663 ring_buffer_normalize_time_stamp(buffer,
5664 cpu_buffer->cpu, ts);
5665 }
5666 return event;
5667
5668 default:
5669 RB_WARN_ON(cpu_buffer, 1);
5670 }
5671
5672 return NULL;
5673 }
5674 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
5675
rb_reader_lock(struct ring_buffer_per_cpu * cpu_buffer)5676 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
5677 {
5678 if (likely(!in_nmi())) {
5679 raw_spin_lock(&cpu_buffer->reader_lock);
5680 return true;
5681 }
5682
5683 /*
5684 * If an NMI die dumps out the content of the ring buffer
5685 * trylock must be used to prevent a deadlock if the NMI
5686 * preempted a task that holds the ring buffer locks. If
5687 * we get the lock then all is fine, if not, then continue
5688 * to do the read, but this can corrupt the ring buffer,
5689 * so it must be permanently disabled from future writes.
5690 * Reading from NMI is a oneshot deal.
5691 */
5692 if (raw_spin_trylock(&cpu_buffer->reader_lock))
5693 return true;
5694
5695 /* Continue without locking, but disable the ring buffer */
5696 atomic_inc(&cpu_buffer->record_disabled);
5697 return false;
5698 }
5699
5700 static inline void
rb_reader_unlock(struct ring_buffer_per_cpu * cpu_buffer,bool locked)5701 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
5702 {
5703 if (likely(locked))
5704 raw_spin_unlock(&cpu_buffer->reader_lock);
5705 }
5706
5707 /**
5708 * ring_buffer_peek - peek at the next event to be read
5709 * @buffer: The ring buffer to read
5710 * @cpu: The cpu to peak at
5711 * @ts: The timestamp counter of this event.
5712 * @lost_events: a variable to store if events were lost (may be NULL)
5713 *
5714 * This will return the event that will be read next, but does
5715 * not consume the data.
5716 */
5717 struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)5718 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
5719 unsigned long *lost_events)
5720 {
5721 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5722 struct ring_buffer_event *event;
5723 unsigned long flags;
5724 bool dolock;
5725
5726 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5727 return NULL;
5728
5729 again:
5730 local_irq_save(flags);
5731 dolock = rb_reader_lock(cpu_buffer);
5732 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5733 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5734 rb_advance_reader(cpu_buffer);
5735 rb_reader_unlock(cpu_buffer, dolock);
5736 local_irq_restore(flags);
5737
5738 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5739 goto again;
5740
5741 return event;
5742 }
5743
5744 /** ring_buffer_iter_dropped - report if there are dropped events
5745 * @iter: The ring buffer iterator
5746 *
5747 * Returns true if there was dropped events since the last peek.
5748 */
ring_buffer_iter_dropped(struct ring_buffer_iter * iter)5749 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
5750 {
5751 bool ret = iter->missed_events != 0;
5752
5753 iter->missed_events = 0;
5754 return ret;
5755 }
5756 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
5757
5758 /**
5759 * ring_buffer_iter_peek - peek at the next event to be read
5760 * @iter: The ring buffer iterator
5761 * @ts: The timestamp counter of this event.
5762 *
5763 * This will return the event that will be read next, but does
5764 * not increment the iterator.
5765 */
5766 struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter * iter,u64 * ts)5767 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5768 {
5769 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5770 struct ring_buffer_event *event;
5771 unsigned long flags;
5772
5773 again:
5774 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5775 event = rb_iter_peek(iter, ts);
5776 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5777
5778 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5779 goto again;
5780
5781 return event;
5782 }
5783
5784 /**
5785 * ring_buffer_consume - return an event and consume it
5786 * @buffer: The ring buffer to get the next event from
5787 * @cpu: the cpu to read the buffer from
5788 * @ts: a variable to store the timestamp (may be NULL)
5789 * @lost_events: a variable to store if events were lost (may be NULL)
5790 *
5791 * Returns the next event in the ring buffer, and that event is consumed.
5792 * Meaning, that sequential reads will keep returning a different event,
5793 * and eventually empty the ring buffer if the producer is slower.
5794 */
5795 struct ring_buffer_event *
ring_buffer_consume(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)5796 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5797 unsigned long *lost_events)
5798 {
5799 struct ring_buffer_per_cpu *cpu_buffer;
5800 struct ring_buffer_event *event = NULL;
5801 unsigned long flags;
5802 bool dolock;
5803
5804 again:
5805 /* might be called in atomic */
5806 preempt_disable();
5807
5808 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5809 goto out;
5810
5811 cpu_buffer = buffer->buffers[cpu];
5812 local_irq_save(flags);
5813 dolock = rb_reader_lock(cpu_buffer);
5814
5815 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5816 if (event) {
5817 cpu_buffer->lost_events = 0;
5818 rb_advance_reader(cpu_buffer);
5819 }
5820
5821 rb_reader_unlock(cpu_buffer, dolock);
5822 local_irq_restore(flags);
5823
5824 out:
5825 preempt_enable();
5826
5827 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5828 goto again;
5829
5830 return event;
5831 }
5832 EXPORT_SYMBOL_GPL(ring_buffer_consume);
5833
5834 /**
5835 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5836 * @buffer: The ring buffer to read from
5837 * @cpu: The cpu buffer to iterate over
5838 * @flags: gfp flags to use for memory allocation
5839 *
5840 * This performs the initial preparations necessary to iterate
5841 * through the buffer. Memory is allocated, buffer resizing
5842 * is disabled, and the iterator pointer is returned to the caller.
5843 *
5844 * After a sequence of ring_buffer_read_prepare calls, the user is
5845 * expected to make at least one call to ring_buffer_read_prepare_sync.
5846 * Afterwards, ring_buffer_read_start is invoked to get things going
5847 * for real.
5848 *
5849 * This overall must be paired with ring_buffer_read_finish.
5850 */
5851 struct ring_buffer_iter *
ring_buffer_read_prepare(struct trace_buffer * buffer,int cpu,gfp_t flags)5852 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5853 {
5854 struct ring_buffer_per_cpu *cpu_buffer;
5855 struct ring_buffer_iter *iter;
5856
5857 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5858 return NULL;
5859
5860 iter = kzalloc(sizeof(*iter), flags);
5861 if (!iter)
5862 return NULL;
5863
5864 /* Holds the entire event: data and meta data */
5865 iter->event_size = buffer->subbuf_size;
5866 iter->event = kmalloc(iter->event_size, flags);
5867 if (!iter->event) {
5868 kfree(iter);
5869 return NULL;
5870 }
5871
5872 cpu_buffer = buffer->buffers[cpu];
5873
5874 iter->cpu_buffer = cpu_buffer;
5875
5876 atomic_inc(&cpu_buffer->resize_disabled);
5877
5878 return iter;
5879 }
5880 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5881
5882 /**
5883 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5884 *
5885 * All previously invoked ring_buffer_read_prepare calls to prepare
5886 * iterators will be synchronized. Afterwards, read_buffer_read_start
5887 * calls on those iterators are allowed.
5888 */
5889 void
ring_buffer_read_prepare_sync(void)5890 ring_buffer_read_prepare_sync(void)
5891 {
5892 synchronize_rcu();
5893 }
5894 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5895
5896 /**
5897 * ring_buffer_read_start - start a non consuming read of the buffer
5898 * @iter: The iterator returned by ring_buffer_read_prepare
5899 *
5900 * This finalizes the startup of an iteration through the buffer.
5901 * The iterator comes from a call to ring_buffer_read_prepare and
5902 * an intervening ring_buffer_read_prepare_sync must have been
5903 * performed.
5904 *
5905 * Must be paired with ring_buffer_read_finish.
5906 */
5907 void
ring_buffer_read_start(struct ring_buffer_iter * iter)5908 ring_buffer_read_start(struct ring_buffer_iter *iter)
5909 {
5910 struct ring_buffer_per_cpu *cpu_buffer;
5911 unsigned long flags;
5912
5913 if (!iter)
5914 return;
5915
5916 cpu_buffer = iter->cpu_buffer;
5917
5918 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5919 arch_spin_lock(&cpu_buffer->lock);
5920 rb_iter_reset(iter);
5921 arch_spin_unlock(&cpu_buffer->lock);
5922 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5923 }
5924 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5925
5926 /**
5927 * ring_buffer_read_finish - finish reading the iterator of the buffer
5928 * @iter: The iterator retrieved by ring_buffer_start
5929 *
5930 * This re-enables resizing of the buffer, and frees the iterator.
5931 */
5932 void
ring_buffer_read_finish(struct ring_buffer_iter * iter)5933 ring_buffer_read_finish(struct ring_buffer_iter *iter)
5934 {
5935 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5936
5937 /* Use this opportunity to check the integrity of the ring buffer. */
5938 rb_check_pages(cpu_buffer);
5939
5940 atomic_dec(&cpu_buffer->resize_disabled);
5941 kfree(iter->event);
5942 kfree(iter);
5943 }
5944 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5945
5946 /**
5947 * ring_buffer_iter_advance - advance the iterator to the next location
5948 * @iter: The ring buffer iterator
5949 *
5950 * Move the location of the iterator such that the next read will
5951 * be the next location of the iterator.
5952 */
ring_buffer_iter_advance(struct ring_buffer_iter * iter)5953 void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5954 {
5955 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5956 unsigned long flags;
5957
5958 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5959
5960 rb_advance_iter(iter);
5961
5962 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5963 }
5964 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5965
5966 /**
5967 * ring_buffer_size - return the size of the ring buffer (in bytes)
5968 * @buffer: The ring buffer.
5969 * @cpu: The CPU to get ring buffer size from.
5970 */
ring_buffer_size(struct trace_buffer * buffer,int cpu)5971 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5972 {
5973 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5974 return 0;
5975
5976 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
5977 }
5978 EXPORT_SYMBOL_GPL(ring_buffer_size);
5979
5980 /**
5981 * ring_buffer_max_event_size - return the max data size of an event
5982 * @buffer: The ring buffer.
5983 *
5984 * Returns the maximum size an event can be.
5985 */
ring_buffer_max_event_size(struct trace_buffer * buffer)5986 unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5987 {
5988 /* If abs timestamp is requested, events have a timestamp too */
5989 if (ring_buffer_time_stamp_abs(buffer))
5990 return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5991 return buffer->max_data_size;
5992 }
5993 EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5994
rb_clear_buffer_page(struct buffer_page * page)5995 static void rb_clear_buffer_page(struct buffer_page *page)
5996 {
5997 local_set(&page->write, 0);
5998 local_set(&page->entries, 0);
5999 rb_init_page(page->page);
6000 page->read = 0;
6001 }
6002
rb_update_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6003 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6004 {
6005 struct trace_buffer_meta *meta = cpu_buffer->meta_page;
6006
6007 if (!meta)
6008 return;
6009
6010 meta->reader.read = cpu_buffer->reader_page->read;
6011 meta->reader.id = cpu_buffer->reader_page->id;
6012 meta->reader.lost_events = cpu_buffer->lost_events;
6013
6014 meta->entries = local_read(&cpu_buffer->entries);
6015 meta->overrun = local_read(&cpu_buffer->overrun);
6016 meta->read = cpu_buffer->read;
6017
6018 /* Some archs do not have data cache coherency between kernel and user-space */
6019 flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
6020 }
6021
6022 static void
rb_reset_cpu(struct ring_buffer_per_cpu * cpu_buffer)6023 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
6024 {
6025 struct buffer_page *page;
6026
6027 rb_head_page_deactivate(cpu_buffer);
6028
6029 cpu_buffer->head_page
6030 = list_entry(cpu_buffer->pages, struct buffer_page, list);
6031 rb_clear_buffer_page(cpu_buffer->head_page);
6032 list_for_each_entry(page, cpu_buffer->pages, list) {
6033 rb_clear_buffer_page(page);
6034 }
6035
6036 cpu_buffer->tail_page = cpu_buffer->head_page;
6037 cpu_buffer->commit_page = cpu_buffer->head_page;
6038
6039 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
6040 INIT_LIST_HEAD(&cpu_buffer->new_pages);
6041 rb_clear_buffer_page(cpu_buffer->reader_page);
6042
6043 local_set(&cpu_buffer->entries_bytes, 0);
6044 local_set(&cpu_buffer->overrun, 0);
6045 local_set(&cpu_buffer->commit_overrun, 0);
6046 local_set(&cpu_buffer->dropped_events, 0);
6047 local_set(&cpu_buffer->entries, 0);
6048 local_set(&cpu_buffer->committing, 0);
6049 local_set(&cpu_buffer->commits, 0);
6050 local_set(&cpu_buffer->pages_touched, 0);
6051 local_set(&cpu_buffer->pages_lost, 0);
6052 local_set(&cpu_buffer->pages_read, 0);
6053 cpu_buffer->last_pages_touch = 0;
6054 cpu_buffer->shortest_full = 0;
6055 cpu_buffer->read = 0;
6056 cpu_buffer->read_bytes = 0;
6057
6058 rb_time_set(&cpu_buffer->write_stamp, 0);
6059 rb_time_set(&cpu_buffer->before_stamp, 0);
6060
6061 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
6062
6063 cpu_buffer->lost_events = 0;
6064 cpu_buffer->last_overrun = 0;
6065
6066 rb_head_page_activate(cpu_buffer);
6067 cpu_buffer->pages_removed = 0;
6068
6069 if (cpu_buffer->mapped) {
6070 rb_update_meta_page(cpu_buffer);
6071 if (cpu_buffer->ring_meta) {
6072 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
6073 meta->commit_buffer = meta->head_buffer;
6074 }
6075 }
6076 }
6077
6078 /* Must have disabled the cpu buffer then done a synchronize_rcu */
reset_disabled_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)6079 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6080 {
6081 unsigned long flags;
6082
6083 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6084
6085 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
6086 goto out;
6087
6088 arch_spin_lock(&cpu_buffer->lock);
6089
6090 rb_reset_cpu(cpu_buffer);
6091
6092 arch_spin_unlock(&cpu_buffer->lock);
6093
6094 out:
6095 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6096 }
6097
6098 /**
6099 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
6100 * @buffer: The ring buffer to reset a per cpu buffer of
6101 * @cpu: The CPU buffer to be reset
6102 */
ring_buffer_reset_cpu(struct trace_buffer * buffer,int cpu)6103 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
6104 {
6105 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
6106
6107 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6108 return;
6109
6110 /* prevent another thread from changing buffer sizes */
6111 mutex_lock(&buffer->mutex);
6112
6113 atomic_inc(&cpu_buffer->resize_disabled);
6114 atomic_inc(&cpu_buffer->record_disabled);
6115
6116 /* Make sure all commits have finished */
6117 synchronize_rcu();
6118
6119 reset_disabled_cpu_buffer(cpu_buffer);
6120
6121 atomic_dec(&cpu_buffer->record_disabled);
6122 atomic_dec(&cpu_buffer->resize_disabled);
6123
6124 mutex_unlock(&buffer->mutex);
6125 }
6126 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
6127
6128 /* Flag to ensure proper resetting of atomic variables */
6129 #define RESET_BIT (1 << 30)
6130
6131 /**
6132 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
6133 * @buffer: The ring buffer to reset a per cpu buffer of
6134 */
ring_buffer_reset_online_cpus(struct trace_buffer * buffer)6135 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
6136 {
6137 struct ring_buffer_per_cpu *cpu_buffer;
6138 int cpu;
6139
6140 /* prevent another thread from changing buffer sizes */
6141 mutex_lock(&buffer->mutex);
6142
6143 for_each_online_buffer_cpu(buffer, cpu) {
6144 cpu_buffer = buffer->buffers[cpu];
6145
6146 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
6147 atomic_inc(&cpu_buffer->record_disabled);
6148 }
6149
6150 /* Make sure all commits have finished */
6151 synchronize_rcu();
6152
6153 for_each_buffer_cpu(buffer, cpu) {
6154 cpu_buffer = buffer->buffers[cpu];
6155
6156 /*
6157 * If a CPU came online during the synchronize_rcu(), then
6158 * ignore it.
6159 */
6160 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
6161 continue;
6162
6163 reset_disabled_cpu_buffer(cpu_buffer);
6164
6165 atomic_dec(&cpu_buffer->record_disabled);
6166 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
6167 }
6168
6169 mutex_unlock(&buffer->mutex);
6170 }
6171
6172 /**
6173 * ring_buffer_reset - reset a ring buffer
6174 * @buffer: The ring buffer to reset all cpu buffers
6175 */
ring_buffer_reset(struct trace_buffer * buffer)6176 void ring_buffer_reset(struct trace_buffer *buffer)
6177 {
6178 struct ring_buffer_per_cpu *cpu_buffer;
6179 int cpu;
6180
6181 /* prevent another thread from changing buffer sizes */
6182 mutex_lock(&buffer->mutex);
6183
6184 for_each_buffer_cpu(buffer, cpu) {
6185 cpu_buffer = buffer->buffers[cpu];
6186
6187 atomic_inc(&cpu_buffer->resize_disabled);
6188 atomic_inc(&cpu_buffer->record_disabled);
6189 }
6190
6191 /* Make sure all commits have finished */
6192 synchronize_rcu();
6193
6194 for_each_buffer_cpu(buffer, cpu) {
6195 cpu_buffer = buffer->buffers[cpu];
6196
6197 reset_disabled_cpu_buffer(cpu_buffer);
6198
6199 atomic_dec(&cpu_buffer->record_disabled);
6200 atomic_dec(&cpu_buffer->resize_disabled);
6201 }
6202
6203 mutex_unlock(&buffer->mutex);
6204 }
6205 EXPORT_SYMBOL_GPL(ring_buffer_reset);
6206
6207 /**
6208 * ring_buffer_empty - is the ring buffer empty?
6209 * @buffer: The ring buffer to test
6210 */
ring_buffer_empty(struct trace_buffer * buffer)6211 bool ring_buffer_empty(struct trace_buffer *buffer)
6212 {
6213 struct ring_buffer_per_cpu *cpu_buffer;
6214 unsigned long flags;
6215 bool dolock;
6216 bool ret;
6217 int cpu;
6218
6219 /* yes this is racy, but if you don't like the race, lock the buffer */
6220 for_each_buffer_cpu(buffer, cpu) {
6221 cpu_buffer = buffer->buffers[cpu];
6222 local_irq_save(flags);
6223 dolock = rb_reader_lock(cpu_buffer);
6224 ret = rb_per_cpu_empty(cpu_buffer);
6225 rb_reader_unlock(cpu_buffer, dolock);
6226 local_irq_restore(flags);
6227
6228 if (!ret)
6229 return false;
6230 }
6231
6232 return true;
6233 }
6234 EXPORT_SYMBOL_GPL(ring_buffer_empty);
6235
6236 /**
6237 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
6238 * @buffer: The ring buffer
6239 * @cpu: The CPU buffer to test
6240 */
ring_buffer_empty_cpu(struct trace_buffer * buffer,int cpu)6241 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
6242 {
6243 struct ring_buffer_per_cpu *cpu_buffer;
6244 unsigned long flags;
6245 bool dolock;
6246 bool ret;
6247
6248 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6249 return true;
6250
6251 cpu_buffer = buffer->buffers[cpu];
6252 local_irq_save(flags);
6253 dolock = rb_reader_lock(cpu_buffer);
6254 ret = rb_per_cpu_empty(cpu_buffer);
6255 rb_reader_unlock(cpu_buffer, dolock);
6256 local_irq_restore(flags);
6257
6258 return ret;
6259 }
6260 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
6261
6262 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
6263 /**
6264 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
6265 * @buffer_a: One buffer to swap with
6266 * @buffer_b: The other buffer to swap with
6267 * @cpu: the CPU of the buffers to swap
6268 *
6269 * This function is useful for tracers that want to take a "snapshot"
6270 * of a CPU buffer and has another back up buffer lying around.
6271 * it is expected that the tracer handles the cpu buffer not being
6272 * used at the moment.
6273 */
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)6274 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
6275 struct trace_buffer *buffer_b, int cpu)
6276 {
6277 struct ring_buffer_per_cpu *cpu_buffer_a;
6278 struct ring_buffer_per_cpu *cpu_buffer_b;
6279 int ret = -EINVAL;
6280
6281 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
6282 !cpumask_test_cpu(cpu, buffer_b->cpumask))
6283 goto out;
6284
6285 cpu_buffer_a = buffer_a->buffers[cpu];
6286 cpu_buffer_b = buffer_b->buffers[cpu];
6287
6288 /* It's up to the callers to not try to swap mapped buffers */
6289 if (WARN_ON_ONCE(cpu_buffer_a->mapped || cpu_buffer_b->mapped)) {
6290 ret = -EBUSY;
6291 goto out;
6292 }
6293
6294 /* At least make sure the two buffers are somewhat the same */
6295 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
6296 goto out;
6297
6298 if (buffer_a->subbuf_order != buffer_b->subbuf_order)
6299 goto out;
6300
6301 ret = -EAGAIN;
6302
6303 if (atomic_read(&buffer_a->record_disabled))
6304 goto out;
6305
6306 if (atomic_read(&buffer_b->record_disabled))
6307 goto out;
6308
6309 if (atomic_read(&cpu_buffer_a->record_disabled))
6310 goto out;
6311
6312 if (atomic_read(&cpu_buffer_b->record_disabled))
6313 goto out;
6314
6315 /*
6316 * We can't do a synchronize_rcu here because this
6317 * function can be called in atomic context.
6318 * Normally this will be called from the same CPU as cpu.
6319 * If not it's up to the caller to protect this.
6320 */
6321 atomic_inc(&cpu_buffer_a->record_disabled);
6322 atomic_inc(&cpu_buffer_b->record_disabled);
6323
6324 ret = -EBUSY;
6325 if (local_read(&cpu_buffer_a->committing))
6326 goto out_dec;
6327 if (local_read(&cpu_buffer_b->committing))
6328 goto out_dec;
6329
6330 /*
6331 * When resize is in progress, we cannot swap it because
6332 * it will mess the state of the cpu buffer.
6333 */
6334 if (atomic_read(&buffer_a->resizing))
6335 goto out_dec;
6336 if (atomic_read(&buffer_b->resizing))
6337 goto out_dec;
6338
6339 buffer_a->buffers[cpu] = cpu_buffer_b;
6340 buffer_b->buffers[cpu] = cpu_buffer_a;
6341
6342 cpu_buffer_b->buffer = buffer_a;
6343 cpu_buffer_a->buffer = buffer_b;
6344
6345 ret = 0;
6346
6347 out_dec:
6348 atomic_dec(&cpu_buffer_a->record_disabled);
6349 atomic_dec(&cpu_buffer_b->record_disabled);
6350 out:
6351 return ret;
6352 }
6353 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
6354 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
6355
6356 /**
6357 * ring_buffer_alloc_read_page - allocate a page to read from buffer
6358 * @buffer: the buffer to allocate for.
6359 * @cpu: the cpu buffer to allocate.
6360 *
6361 * This function is used in conjunction with ring_buffer_read_page.
6362 * When reading a full page from the ring buffer, these functions
6363 * can be used to speed up the process. The calling function should
6364 * allocate a few pages first with this function. Then when it
6365 * needs to get pages from the ring buffer, it passes the result
6366 * of this function into ring_buffer_read_page, which will swap
6367 * the page that was allocated, with the read page of the buffer.
6368 *
6369 * Returns:
6370 * The page allocated, or ERR_PTR
6371 */
6372 struct buffer_data_read_page *
ring_buffer_alloc_read_page(struct trace_buffer * buffer,int cpu)6373 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
6374 {
6375 struct ring_buffer_per_cpu *cpu_buffer;
6376 struct buffer_data_read_page *bpage = NULL;
6377 unsigned long flags;
6378 struct page *page;
6379
6380 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6381 return ERR_PTR(-ENODEV);
6382
6383 bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
6384 if (!bpage)
6385 return ERR_PTR(-ENOMEM);
6386
6387 bpage->order = buffer->subbuf_order;
6388 cpu_buffer = buffer->buffers[cpu];
6389 local_irq_save(flags);
6390 arch_spin_lock(&cpu_buffer->lock);
6391
6392 if (cpu_buffer->free_page) {
6393 bpage->data = cpu_buffer->free_page;
6394 cpu_buffer->free_page = NULL;
6395 }
6396
6397 arch_spin_unlock(&cpu_buffer->lock);
6398 local_irq_restore(flags);
6399
6400 if (bpage->data)
6401 goto out;
6402
6403 page = alloc_pages_node(cpu_to_node(cpu),
6404 GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | __GFP_ZERO,
6405 cpu_buffer->buffer->subbuf_order);
6406 if (!page) {
6407 kfree(bpage);
6408 return ERR_PTR(-ENOMEM);
6409 }
6410
6411 bpage->data = page_address(page);
6412
6413 out:
6414 rb_init_page(bpage->data);
6415
6416 return bpage;
6417 }
6418 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
6419
6420 /**
6421 * ring_buffer_free_read_page - free an allocated read page
6422 * @buffer: the buffer the page was allocate for
6423 * @cpu: the cpu buffer the page came from
6424 * @data_page: the page to free
6425 *
6426 * Free a page allocated from ring_buffer_alloc_read_page.
6427 */
ring_buffer_free_read_page(struct trace_buffer * buffer,int cpu,struct buffer_data_read_page * data_page)6428 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
6429 struct buffer_data_read_page *data_page)
6430 {
6431 struct ring_buffer_per_cpu *cpu_buffer;
6432 struct buffer_data_page *bpage = data_page->data;
6433 struct page *page = virt_to_page(bpage);
6434 unsigned long flags;
6435
6436 if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
6437 return;
6438
6439 cpu_buffer = buffer->buffers[cpu];
6440
6441 /*
6442 * If the page is still in use someplace else, or order of the page
6443 * is different from the subbuffer order of the buffer -
6444 * we can't reuse it
6445 */
6446 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
6447 goto out;
6448
6449 local_irq_save(flags);
6450 arch_spin_lock(&cpu_buffer->lock);
6451
6452 if (!cpu_buffer->free_page) {
6453 cpu_buffer->free_page = bpage;
6454 bpage = NULL;
6455 }
6456
6457 arch_spin_unlock(&cpu_buffer->lock);
6458 local_irq_restore(flags);
6459
6460 out:
6461 free_pages((unsigned long)bpage, data_page->order);
6462 kfree(data_page);
6463 }
6464 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
6465
6466 /**
6467 * ring_buffer_read_page - extract a page from the ring buffer
6468 * @buffer: buffer to extract from
6469 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
6470 * @len: amount to extract
6471 * @cpu: the cpu of the buffer to extract
6472 * @full: should the extraction only happen when the page is full.
6473 *
6474 * This function will pull out a page from the ring buffer and consume it.
6475 * @data_page must be the address of the variable that was returned
6476 * from ring_buffer_alloc_read_page. This is because the page might be used
6477 * to swap with a page in the ring buffer.
6478 *
6479 * for example:
6480 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
6481 * if (IS_ERR(rpage))
6482 * return PTR_ERR(rpage);
6483 * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
6484 * if (ret >= 0)
6485 * process_page(ring_buffer_read_page_data(rpage), ret);
6486 * ring_buffer_free_read_page(buffer, cpu, rpage);
6487 *
6488 * When @full is set, the function will not return true unless
6489 * the writer is off the reader page.
6490 *
6491 * Note: it is up to the calling functions to handle sleeps and wakeups.
6492 * The ring buffer can be used anywhere in the kernel and can not
6493 * blindly call wake_up. The layer that uses the ring buffer must be
6494 * responsible for that.
6495 *
6496 * Returns:
6497 * >=0 if data has been transferred, returns the offset of consumed data.
6498 * <0 if no data has been transferred.
6499 */
ring_buffer_read_page(struct trace_buffer * buffer,struct buffer_data_read_page * data_page,size_t len,int cpu,int full)6500 int ring_buffer_read_page(struct trace_buffer *buffer,
6501 struct buffer_data_read_page *data_page,
6502 size_t len, int cpu, int full)
6503 {
6504 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
6505 struct ring_buffer_event *event;
6506 struct buffer_data_page *bpage;
6507 struct buffer_page *reader;
6508 unsigned long missed_events;
6509 unsigned long flags;
6510 unsigned int commit;
6511 unsigned int read;
6512 u64 save_timestamp;
6513 int ret = -1;
6514
6515 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6516 goto out;
6517
6518 /*
6519 * If len is not big enough to hold the page header, then
6520 * we can not copy anything.
6521 */
6522 if (len <= BUF_PAGE_HDR_SIZE)
6523 goto out;
6524
6525 len -= BUF_PAGE_HDR_SIZE;
6526
6527 if (!data_page || !data_page->data)
6528 goto out;
6529 if (data_page->order != buffer->subbuf_order)
6530 goto out;
6531
6532 bpage = data_page->data;
6533 if (!bpage)
6534 goto out;
6535
6536 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6537
6538 reader = rb_get_reader_page(cpu_buffer);
6539 if (!reader)
6540 goto out_unlock;
6541
6542 event = rb_reader_event(cpu_buffer);
6543
6544 read = reader->read;
6545 commit = rb_page_size(reader);
6546
6547 /* Check if any events were dropped */
6548 missed_events = cpu_buffer->lost_events;
6549
6550 /*
6551 * If this page has been partially read or
6552 * if len is not big enough to read the rest of the page or
6553 * a writer is still on the page, then
6554 * we must copy the data from the page to the buffer.
6555 * Otherwise, we can simply swap the page with the one passed in.
6556 */
6557 if (read || (len < (commit - read)) ||
6558 cpu_buffer->reader_page == cpu_buffer->commit_page ||
6559 cpu_buffer->mapped) {
6560 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
6561 unsigned int rpos = read;
6562 unsigned int pos = 0;
6563 unsigned int size;
6564
6565 /*
6566 * If a full page is expected, this can still be returned
6567 * if there's been a previous partial read and the
6568 * rest of the page can be read and the commit page is off
6569 * the reader page.
6570 */
6571 if (full &&
6572 (!read || (len < (commit - read)) ||
6573 cpu_buffer->reader_page == cpu_buffer->commit_page))
6574 goto out_unlock;
6575
6576 if (len > (commit - read))
6577 len = (commit - read);
6578
6579 /* Always keep the time extend and data together */
6580 size = rb_event_ts_length(event);
6581
6582 if (len < size)
6583 goto out_unlock;
6584
6585 /* save the current timestamp, since the user will need it */
6586 save_timestamp = cpu_buffer->read_stamp;
6587
6588 /* Need to copy one event at a time */
6589 do {
6590 /* We need the size of one event, because
6591 * rb_advance_reader only advances by one event,
6592 * whereas rb_event_ts_length may include the size of
6593 * one or two events.
6594 * We have already ensured there's enough space if this
6595 * is a time extend. */
6596 size = rb_event_length(event);
6597 memcpy(bpage->data + pos, rpage->data + rpos, size);
6598
6599 len -= size;
6600
6601 rb_advance_reader(cpu_buffer);
6602 rpos = reader->read;
6603 pos += size;
6604
6605 if (rpos >= commit)
6606 break;
6607
6608 event = rb_reader_event(cpu_buffer);
6609 /* Always keep the time extend and data together */
6610 size = rb_event_ts_length(event);
6611 } while (len >= size);
6612
6613 /* update bpage */
6614 local_set(&bpage->commit, pos);
6615 bpage->time_stamp = save_timestamp;
6616
6617 /* we copied everything to the beginning */
6618 read = 0;
6619 } else {
6620 /* update the entry counter */
6621 cpu_buffer->read += rb_page_entries(reader);
6622 cpu_buffer->read_bytes += rb_page_size(reader);
6623
6624 /* swap the pages */
6625 rb_init_page(bpage);
6626 bpage = reader->page;
6627 reader->page = data_page->data;
6628 local_set(&reader->write, 0);
6629 local_set(&reader->entries, 0);
6630 reader->read = 0;
6631 data_page->data = bpage;
6632
6633 /*
6634 * Use the real_end for the data size,
6635 * This gives us a chance to store the lost events
6636 * on the page.
6637 */
6638 if (reader->real_end)
6639 local_set(&bpage->commit, reader->real_end);
6640 }
6641 ret = read;
6642
6643 cpu_buffer->lost_events = 0;
6644
6645 commit = local_read(&bpage->commit);
6646 /*
6647 * Set a flag in the commit field if we lost events
6648 */
6649 if (missed_events) {
6650 /* If there is room at the end of the page to save the
6651 * missed events, then record it there.
6652 */
6653 if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
6654 memcpy(&bpage->data[commit], &missed_events,
6655 sizeof(missed_events));
6656 local_add(RB_MISSED_STORED, &bpage->commit);
6657 commit += sizeof(missed_events);
6658 }
6659 local_add(RB_MISSED_EVENTS, &bpage->commit);
6660 }
6661
6662 /*
6663 * This page may be off to user land. Zero it out here.
6664 */
6665 if (commit < buffer->subbuf_size)
6666 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
6667
6668 out_unlock:
6669 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6670
6671 out:
6672 return ret;
6673 }
6674 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
6675
6676 /**
6677 * ring_buffer_read_page_data - get pointer to the data in the page.
6678 * @page: the page to get the data from
6679 *
6680 * Returns pointer to the actual data in this page.
6681 */
ring_buffer_read_page_data(struct buffer_data_read_page * page)6682 void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
6683 {
6684 return page->data;
6685 }
6686 EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
6687
6688 /**
6689 * ring_buffer_subbuf_size_get - get size of the sub buffer.
6690 * @buffer: the buffer to get the sub buffer size from
6691 *
6692 * Returns size of the sub buffer, in bytes.
6693 */
ring_buffer_subbuf_size_get(struct trace_buffer * buffer)6694 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
6695 {
6696 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
6697 }
6698 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
6699
6700 /**
6701 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
6702 * @buffer: The ring_buffer to get the system sub page order from
6703 *
6704 * By default, one ring buffer sub page equals to one system page. This parameter
6705 * is configurable, per ring buffer. The size of the ring buffer sub page can be
6706 * extended, but must be an order of system page size.
6707 *
6708 * Returns the order of buffer sub page size, in system pages:
6709 * 0 means the sub buffer size is 1 system page and so forth.
6710 * In case of an error < 0 is returned.
6711 */
ring_buffer_subbuf_order_get(struct trace_buffer * buffer)6712 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
6713 {
6714 if (!buffer)
6715 return -EINVAL;
6716
6717 return buffer->subbuf_order;
6718 }
6719 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
6720
6721 /**
6722 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
6723 * @buffer: The ring_buffer to set the new page size.
6724 * @order: Order of the system pages in one sub buffer page
6725 *
6726 * By default, one ring buffer pages equals to one system page. This API can be
6727 * used to set new size of the ring buffer page. The size must be order of
6728 * system page size, that's why the input parameter @order is the order of
6729 * system pages that are allocated for one ring buffer page:
6730 * 0 - 1 system page
6731 * 1 - 2 system pages
6732 * 3 - 4 system pages
6733 * ...
6734 *
6735 * Returns 0 on success or < 0 in case of an error.
6736 */
ring_buffer_subbuf_order_set(struct trace_buffer * buffer,int order)6737 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
6738 {
6739 struct ring_buffer_per_cpu *cpu_buffer;
6740 struct buffer_page *bpage, *tmp;
6741 int old_order, old_size;
6742 int nr_pages;
6743 int psize;
6744 int err;
6745 int cpu;
6746
6747 if (!buffer || order < 0)
6748 return -EINVAL;
6749
6750 if (buffer->subbuf_order == order)
6751 return 0;
6752
6753 psize = (1 << order) * PAGE_SIZE;
6754 if (psize <= BUF_PAGE_HDR_SIZE)
6755 return -EINVAL;
6756
6757 /* Size of a subbuf cannot be greater than the write counter */
6758 if (psize > RB_WRITE_MASK + 1)
6759 return -EINVAL;
6760
6761 old_order = buffer->subbuf_order;
6762 old_size = buffer->subbuf_size;
6763
6764 /* prevent another thread from changing buffer sizes */
6765 mutex_lock(&buffer->mutex);
6766 atomic_inc(&buffer->record_disabled);
6767
6768 /* Make sure all commits have finished */
6769 synchronize_rcu();
6770
6771 buffer->subbuf_order = order;
6772 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
6773
6774 /* Make sure all new buffers are allocated, before deleting the old ones */
6775 for_each_buffer_cpu(buffer, cpu) {
6776
6777 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6778 continue;
6779
6780 cpu_buffer = buffer->buffers[cpu];
6781
6782 if (cpu_buffer->mapped) {
6783 err = -EBUSY;
6784 goto error;
6785 }
6786
6787 /* Update the number of pages to match the new size */
6788 nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
6789 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
6790
6791 /* we need a minimum of two pages */
6792 if (nr_pages < 2)
6793 nr_pages = 2;
6794
6795 cpu_buffer->nr_pages_to_update = nr_pages;
6796
6797 /* Include the reader page */
6798 nr_pages++;
6799
6800 /* Allocate the new size buffer */
6801 INIT_LIST_HEAD(&cpu_buffer->new_pages);
6802 if (__rb_allocate_pages(cpu_buffer, nr_pages,
6803 &cpu_buffer->new_pages)) {
6804 /* not enough memory for new pages */
6805 err = -ENOMEM;
6806 goto error;
6807 }
6808 }
6809
6810 for_each_buffer_cpu(buffer, cpu) {
6811 struct buffer_data_page *old_free_data_page;
6812 struct list_head old_pages;
6813 unsigned long flags;
6814
6815 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6816 continue;
6817
6818 cpu_buffer = buffer->buffers[cpu];
6819
6820 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6821
6822 /* Clear the head bit to make the link list normal to read */
6823 rb_head_page_deactivate(cpu_buffer);
6824
6825 /*
6826 * Collect buffers from the cpu_buffer pages list and the
6827 * reader_page on old_pages, so they can be freed later when not
6828 * under a spinlock. The pages list is a linked list with no
6829 * head, adding old_pages turns it into a regular list with
6830 * old_pages being the head.
6831 */
6832 list_add(&old_pages, cpu_buffer->pages);
6833 list_add(&cpu_buffer->reader_page->list, &old_pages);
6834
6835 /* One page was allocated for the reader page */
6836 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
6837 struct buffer_page, list);
6838 list_del_init(&cpu_buffer->reader_page->list);
6839
6840 /* Install the new pages, remove the head from the list */
6841 cpu_buffer->pages = cpu_buffer->new_pages.next;
6842 list_del_init(&cpu_buffer->new_pages);
6843 cpu_buffer->cnt++;
6844
6845 cpu_buffer->head_page
6846 = list_entry(cpu_buffer->pages, struct buffer_page, list);
6847 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
6848
6849 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
6850 cpu_buffer->nr_pages_to_update = 0;
6851
6852 old_free_data_page = cpu_buffer->free_page;
6853 cpu_buffer->free_page = NULL;
6854
6855 rb_head_page_activate(cpu_buffer);
6856
6857 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6858
6859 /* Free old sub buffers */
6860 list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
6861 list_del_init(&bpage->list);
6862 free_buffer_page(bpage);
6863 }
6864 free_pages((unsigned long)old_free_data_page, old_order);
6865
6866 rb_check_pages(cpu_buffer);
6867 }
6868
6869 atomic_dec(&buffer->record_disabled);
6870 mutex_unlock(&buffer->mutex);
6871
6872 return 0;
6873
6874 error:
6875 buffer->subbuf_order = old_order;
6876 buffer->subbuf_size = old_size;
6877
6878 atomic_dec(&buffer->record_disabled);
6879 mutex_unlock(&buffer->mutex);
6880
6881 for_each_buffer_cpu(buffer, cpu) {
6882 cpu_buffer = buffer->buffers[cpu];
6883
6884 if (!cpu_buffer->nr_pages_to_update)
6885 continue;
6886
6887 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6888 list_del_init(&bpage->list);
6889 free_buffer_page(bpage);
6890 }
6891 }
6892
6893 return err;
6894 }
6895 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6896
rb_alloc_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6897 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6898 {
6899 struct page *page;
6900
6901 if (cpu_buffer->meta_page)
6902 return 0;
6903
6904 page = alloc_page(GFP_USER | __GFP_ZERO);
6905 if (!page)
6906 return -ENOMEM;
6907
6908 cpu_buffer->meta_page = page_to_virt(page);
6909
6910 return 0;
6911 }
6912
rb_free_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6913 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6914 {
6915 unsigned long addr = (unsigned long)cpu_buffer->meta_page;
6916
6917 free_page(addr);
6918 cpu_buffer->meta_page = NULL;
6919 }
6920
rb_setup_ids_meta_page(struct ring_buffer_per_cpu * cpu_buffer,unsigned long * subbuf_ids)6921 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
6922 unsigned long *subbuf_ids)
6923 {
6924 struct trace_buffer_meta *meta = cpu_buffer->meta_page;
6925 unsigned int nr_subbufs = cpu_buffer->nr_pages + 1;
6926 struct buffer_page *first_subbuf, *subbuf;
6927 int id = 0;
6928
6929 subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page;
6930 cpu_buffer->reader_page->id = id++;
6931
6932 first_subbuf = subbuf = rb_set_head_page(cpu_buffer);
6933 do {
6934 if (WARN_ON(id >= nr_subbufs))
6935 break;
6936
6937 subbuf_ids[id] = (unsigned long)subbuf->page;
6938 subbuf->id = id;
6939
6940 rb_inc_page(&subbuf);
6941 id++;
6942 } while (subbuf != first_subbuf);
6943
6944 /* install subbuf ID to kern VA translation */
6945 cpu_buffer->subbuf_ids = subbuf_ids;
6946
6947 meta->meta_struct_len = sizeof(*meta);
6948 meta->nr_subbufs = nr_subbufs;
6949 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
6950 meta->meta_page_size = meta->subbuf_size;
6951
6952 rb_update_meta_page(cpu_buffer);
6953 }
6954
6955 static struct ring_buffer_per_cpu *
rb_get_mapped_buffer(struct trace_buffer * buffer,int cpu)6956 rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
6957 {
6958 struct ring_buffer_per_cpu *cpu_buffer;
6959
6960 if (!cpumask_test_cpu(cpu, buffer->cpumask))
6961 return ERR_PTR(-EINVAL);
6962
6963 cpu_buffer = buffer->buffers[cpu];
6964
6965 mutex_lock(&cpu_buffer->mapping_lock);
6966
6967 if (!cpu_buffer->user_mapped) {
6968 mutex_unlock(&cpu_buffer->mapping_lock);
6969 return ERR_PTR(-ENODEV);
6970 }
6971
6972 return cpu_buffer;
6973 }
6974
rb_put_mapped_buffer(struct ring_buffer_per_cpu * cpu_buffer)6975 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6976 {
6977 mutex_unlock(&cpu_buffer->mapping_lock);
6978 }
6979
6980 /*
6981 * Fast-path for rb_buffer_(un)map(). Called whenever the meta-page doesn't need
6982 * to be set-up or torn-down.
6983 */
__rb_inc_dec_mapped(struct ring_buffer_per_cpu * cpu_buffer,bool inc)6984 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
6985 bool inc)
6986 {
6987 unsigned long flags;
6988
6989 lockdep_assert_held(&cpu_buffer->mapping_lock);
6990
6991 /* mapped is always greater or equal to user_mapped */
6992 if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
6993 return -EINVAL;
6994
6995 if (inc && cpu_buffer->mapped == UINT_MAX)
6996 return -EBUSY;
6997
6998 if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
6999 return -EINVAL;
7000
7001 mutex_lock(&cpu_buffer->buffer->mutex);
7002 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7003
7004 if (inc) {
7005 cpu_buffer->user_mapped++;
7006 cpu_buffer->mapped++;
7007 } else {
7008 cpu_buffer->user_mapped--;
7009 cpu_buffer->mapped--;
7010 }
7011
7012 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7013 mutex_unlock(&cpu_buffer->buffer->mutex);
7014
7015 return 0;
7016 }
7017
7018 /*
7019 * +--------------+ pgoff == 0
7020 * | meta page |
7021 * +--------------+ pgoff == 1
7022 * | subbuffer 0 |
7023 * | |
7024 * +--------------+ pgoff == (1 + (1 << subbuf_order))
7025 * | subbuffer 1 |
7026 * | |
7027 * ...
7028 */
7029 #ifdef CONFIG_MMU
__rb_map_vma(struct ring_buffer_per_cpu * cpu_buffer,struct vm_area_struct * vma)7030 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
7031 struct vm_area_struct *vma)
7032 {
7033 unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff;
7034 unsigned int subbuf_pages, subbuf_order;
7035 struct page **pages;
7036 int p = 0, s = 0;
7037 int err;
7038
7039 /* Refuse MP_PRIVATE or writable mappings */
7040 if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
7041 !(vma->vm_flags & VM_MAYSHARE))
7042 return -EPERM;
7043
7044 subbuf_order = cpu_buffer->buffer->subbuf_order;
7045 subbuf_pages = 1 << subbuf_order;
7046
7047 if (subbuf_order && pgoff % subbuf_pages)
7048 return -EINVAL;
7049
7050 /*
7051 * Make sure the mapping cannot become writable later. Also tell the VM
7052 * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
7053 */
7054 vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP,
7055 VM_MAYWRITE);
7056
7057 lockdep_assert_held(&cpu_buffer->mapping_lock);
7058
7059 nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
7060 nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */
7061 if (nr_pages <= pgoff)
7062 return -EINVAL;
7063
7064 nr_pages -= pgoff;
7065
7066 nr_vma_pages = vma_pages(vma);
7067 if (!nr_vma_pages || nr_vma_pages > nr_pages)
7068 return -EINVAL;
7069
7070 nr_pages = nr_vma_pages;
7071
7072 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
7073 if (!pages)
7074 return -ENOMEM;
7075
7076 if (!pgoff) {
7077 unsigned long meta_page_padding;
7078
7079 pages[p++] = virt_to_page(cpu_buffer->meta_page);
7080
7081 /*
7082 * Pad with the zero-page to align the meta-page with the
7083 * sub-buffers.
7084 */
7085 meta_page_padding = subbuf_pages - 1;
7086 while (meta_page_padding-- && p < nr_pages) {
7087 unsigned long __maybe_unused zero_addr =
7088 vma->vm_start + (PAGE_SIZE * p);
7089
7090 pages[p++] = ZERO_PAGE(zero_addr);
7091 }
7092 } else {
7093 /* Skip the meta-page */
7094 pgoff -= subbuf_pages;
7095
7096 s += pgoff / subbuf_pages;
7097 }
7098
7099 while (p < nr_pages) {
7100 struct page *page;
7101 int off = 0;
7102
7103 if (WARN_ON_ONCE(s >= nr_subbufs)) {
7104 err = -EINVAL;
7105 goto out;
7106 }
7107
7108 page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
7109
7110 for (; off < (1 << (subbuf_order)); off++, page++) {
7111 if (p >= nr_pages)
7112 break;
7113
7114 pages[p++] = page;
7115 }
7116 s++;
7117 }
7118
7119 err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
7120
7121 out:
7122 kfree(pages);
7123
7124 return err;
7125 }
7126 #else
__rb_map_vma(struct ring_buffer_per_cpu * cpu_buffer,struct vm_area_struct * vma)7127 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
7128 struct vm_area_struct *vma)
7129 {
7130 return -EOPNOTSUPP;
7131 }
7132 #endif
7133
ring_buffer_map(struct trace_buffer * buffer,int cpu,struct vm_area_struct * vma)7134 int ring_buffer_map(struct trace_buffer *buffer, int cpu,
7135 struct vm_area_struct *vma)
7136 {
7137 struct ring_buffer_per_cpu *cpu_buffer;
7138 unsigned long flags, *subbuf_ids;
7139 int err = 0;
7140
7141 if (!cpumask_test_cpu(cpu, buffer->cpumask))
7142 return -EINVAL;
7143
7144 cpu_buffer = buffer->buffers[cpu];
7145
7146 mutex_lock(&cpu_buffer->mapping_lock);
7147
7148 if (cpu_buffer->user_mapped) {
7149 err = __rb_map_vma(cpu_buffer, vma);
7150 if (!err)
7151 err = __rb_inc_dec_mapped(cpu_buffer, true);
7152 mutex_unlock(&cpu_buffer->mapping_lock);
7153 return err;
7154 }
7155
7156 /* prevent another thread from changing buffer/sub-buffer sizes */
7157 mutex_lock(&buffer->mutex);
7158
7159 err = rb_alloc_meta_page(cpu_buffer);
7160 if (err)
7161 goto unlock;
7162
7163 /* subbuf_ids include the reader while nr_pages does not */
7164 subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
7165 if (!subbuf_ids) {
7166 rb_free_meta_page(cpu_buffer);
7167 err = -ENOMEM;
7168 goto unlock;
7169 }
7170
7171 atomic_inc(&cpu_buffer->resize_disabled);
7172
7173 /*
7174 * Lock all readers to block any subbuf swap until the subbuf IDs are
7175 * assigned.
7176 */
7177 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7178 rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
7179
7180 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7181
7182 err = __rb_map_vma(cpu_buffer, vma);
7183 if (!err) {
7184 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7185 /* This is the first time it is mapped by user */
7186 cpu_buffer->mapped++;
7187 cpu_buffer->user_mapped = 1;
7188 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7189 } else {
7190 kfree(cpu_buffer->subbuf_ids);
7191 cpu_buffer->subbuf_ids = NULL;
7192 rb_free_meta_page(cpu_buffer);
7193 atomic_dec(&cpu_buffer->resize_disabled);
7194 }
7195
7196 unlock:
7197 mutex_unlock(&buffer->mutex);
7198 mutex_unlock(&cpu_buffer->mapping_lock);
7199
7200 return err;
7201 }
7202
ring_buffer_unmap(struct trace_buffer * buffer,int cpu)7203 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
7204 {
7205 struct ring_buffer_per_cpu *cpu_buffer;
7206 unsigned long flags;
7207 int err = 0;
7208
7209 if (!cpumask_test_cpu(cpu, buffer->cpumask))
7210 return -EINVAL;
7211
7212 cpu_buffer = buffer->buffers[cpu];
7213
7214 mutex_lock(&cpu_buffer->mapping_lock);
7215
7216 if (!cpu_buffer->user_mapped) {
7217 err = -ENODEV;
7218 goto out;
7219 } else if (cpu_buffer->user_mapped > 1) {
7220 __rb_inc_dec_mapped(cpu_buffer, false);
7221 goto out;
7222 }
7223
7224 mutex_lock(&buffer->mutex);
7225 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7226
7227 /* This is the last user space mapping */
7228 if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
7229 cpu_buffer->mapped--;
7230 cpu_buffer->user_mapped = 0;
7231
7232 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7233
7234 kfree(cpu_buffer->subbuf_ids);
7235 cpu_buffer->subbuf_ids = NULL;
7236 rb_free_meta_page(cpu_buffer);
7237 atomic_dec(&cpu_buffer->resize_disabled);
7238
7239 mutex_unlock(&buffer->mutex);
7240
7241 out:
7242 mutex_unlock(&cpu_buffer->mapping_lock);
7243
7244 return err;
7245 }
7246
ring_buffer_map_get_reader(struct trace_buffer * buffer,int cpu)7247 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
7248 {
7249 struct ring_buffer_per_cpu *cpu_buffer;
7250 struct buffer_page *reader;
7251 unsigned long missed_events;
7252 unsigned long reader_size;
7253 unsigned long flags;
7254
7255 cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
7256 if (IS_ERR(cpu_buffer))
7257 return (int)PTR_ERR(cpu_buffer);
7258
7259 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7260
7261 consume:
7262 if (rb_per_cpu_empty(cpu_buffer))
7263 goto out;
7264
7265 reader_size = rb_page_size(cpu_buffer->reader_page);
7266
7267 /*
7268 * There are data to be read on the current reader page, we can
7269 * return to the caller. But before that, we assume the latter will read
7270 * everything. Let's update the kernel reader accordingly.
7271 */
7272 if (cpu_buffer->reader_page->read < reader_size) {
7273 while (cpu_buffer->reader_page->read < reader_size)
7274 rb_advance_reader(cpu_buffer);
7275 goto out;
7276 }
7277
7278 reader = rb_get_reader_page(cpu_buffer);
7279 if (WARN_ON(!reader))
7280 goto out;
7281
7282 /* Check if any events were dropped */
7283 missed_events = cpu_buffer->lost_events;
7284
7285 if (cpu_buffer->reader_page != cpu_buffer->commit_page) {
7286 if (missed_events) {
7287 struct buffer_data_page *bpage = reader->page;
7288 unsigned int commit;
7289 /*
7290 * Use the real_end for the data size,
7291 * This gives us a chance to store the lost events
7292 * on the page.
7293 */
7294 if (reader->real_end)
7295 local_set(&bpage->commit, reader->real_end);
7296 /*
7297 * If there is room at the end of the page to save the
7298 * missed events, then record it there.
7299 */
7300 commit = rb_page_size(reader);
7301 if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
7302 memcpy(&bpage->data[commit], &missed_events,
7303 sizeof(missed_events));
7304 local_add(RB_MISSED_STORED, &bpage->commit);
7305 }
7306 local_add(RB_MISSED_EVENTS, &bpage->commit);
7307 }
7308 } else {
7309 /*
7310 * There really shouldn't be any missed events if the commit
7311 * is on the reader page.
7312 */
7313 WARN_ON_ONCE(missed_events);
7314 }
7315
7316 cpu_buffer->lost_events = 0;
7317
7318 goto consume;
7319
7320 out:
7321 /* Some archs do not have data cache coherency between kernel and user-space */
7322 flush_kernel_vmap_range(cpu_buffer->reader_page->page,
7323 buffer->subbuf_size + BUF_PAGE_HDR_SIZE);
7324
7325 rb_update_meta_page(cpu_buffer);
7326
7327 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7328 rb_put_mapped_buffer(cpu_buffer);
7329
7330 return 0;
7331 }
7332
7333 /*
7334 * We only allocate new buffers, never free them if the CPU goes down.
7335 * If we were to free the buffer, then the user would lose any trace that was in
7336 * the buffer.
7337 */
trace_rb_cpu_prepare(unsigned int cpu,struct hlist_node * node)7338 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
7339 {
7340 struct trace_buffer *buffer;
7341 long nr_pages_same;
7342 int cpu_i;
7343 unsigned long nr_pages;
7344
7345 buffer = container_of(node, struct trace_buffer, node);
7346 if (cpumask_test_cpu(cpu, buffer->cpumask))
7347 return 0;
7348
7349 nr_pages = 0;
7350 nr_pages_same = 1;
7351 /* check if all cpu sizes are same */
7352 for_each_buffer_cpu(buffer, cpu_i) {
7353 /* fill in the size from first enabled cpu */
7354 if (nr_pages == 0)
7355 nr_pages = buffer->buffers[cpu_i]->nr_pages;
7356 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
7357 nr_pages_same = 0;
7358 break;
7359 }
7360 }
7361 /* allocate minimum pages, user can later expand it */
7362 if (!nr_pages_same)
7363 nr_pages = 2;
7364 buffer->buffers[cpu] =
7365 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
7366 if (!buffer->buffers[cpu]) {
7367 WARN(1, "failed to allocate ring buffer on CPU %u\n",
7368 cpu);
7369 return -ENOMEM;
7370 }
7371 smp_wmb();
7372 cpumask_set_cpu(cpu, buffer->cpumask);
7373 return 0;
7374 }
7375
7376 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
7377 /*
7378 * This is a basic integrity check of the ring buffer.
7379 * Late in the boot cycle this test will run when configured in.
7380 * It will kick off a thread per CPU that will go into a loop
7381 * writing to the per cpu ring buffer various sizes of data.
7382 * Some of the data will be large items, some small.
7383 *
7384 * Another thread is created that goes into a spin, sending out
7385 * IPIs to the other CPUs to also write into the ring buffer.
7386 * this is to test the nesting ability of the buffer.
7387 *
7388 * Basic stats are recorded and reported. If something in the
7389 * ring buffer should happen that's not expected, a big warning
7390 * is displayed and all ring buffers are disabled.
7391 */
7392 static struct task_struct *rb_threads[NR_CPUS] __initdata;
7393
7394 struct rb_test_data {
7395 struct trace_buffer *buffer;
7396 unsigned long events;
7397 unsigned long bytes_written;
7398 unsigned long bytes_alloc;
7399 unsigned long bytes_dropped;
7400 unsigned long events_nested;
7401 unsigned long bytes_written_nested;
7402 unsigned long bytes_alloc_nested;
7403 unsigned long bytes_dropped_nested;
7404 int min_size_nested;
7405 int max_size_nested;
7406 int max_size;
7407 int min_size;
7408 int cpu;
7409 int cnt;
7410 };
7411
7412 static struct rb_test_data rb_data[NR_CPUS] __initdata;
7413
7414 /* 1 meg per cpu */
7415 #define RB_TEST_BUFFER_SIZE 1048576
7416
7417 static char rb_string[] __initdata =
7418 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
7419 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
7420 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
7421
7422 static bool rb_test_started __initdata;
7423
7424 struct rb_item {
7425 int size;
7426 char str[];
7427 };
7428
rb_write_something(struct rb_test_data * data,bool nested)7429 static __init int rb_write_something(struct rb_test_data *data, bool nested)
7430 {
7431 struct ring_buffer_event *event;
7432 struct rb_item *item;
7433 bool started;
7434 int event_len;
7435 int size;
7436 int len;
7437 int cnt;
7438
7439 /* Have nested writes different that what is written */
7440 cnt = data->cnt + (nested ? 27 : 0);
7441
7442 /* Multiply cnt by ~e, to make some unique increment */
7443 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
7444
7445 len = size + sizeof(struct rb_item);
7446
7447 started = rb_test_started;
7448 /* read rb_test_started before checking buffer enabled */
7449 smp_rmb();
7450
7451 event = ring_buffer_lock_reserve(data->buffer, len);
7452 if (!event) {
7453 /* Ignore dropped events before test starts. */
7454 if (started) {
7455 if (nested)
7456 data->bytes_dropped_nested += len;
7457 else
7458 data->bytes_dropped += len;
7459 }
7460 return len;
7461 }
7462
7463 event_len = ring_buffer_event_length(event);
7464
7465 if (RB_WARN_ON(data->buffer, event_len < len))
7466 goto out;
7467
7468 item = ring_buffer_event_data(event);
7469 item->size = size;
7470 memcpy(item->str, rb_string, size);
7471
7472 if (nested) {
7473 data->bytes_alloc_nested += event_len;
7474 data->bytes_written_nested += len;
7475 data->events_nested++;
7476 if (!data->min_size_nested || len < data->min_size_nested)
7477 data->min_size_nested = len;
7478 if (len > data->max_size_nested)
7479 data->max_size_nested = len;
7480 } else {
7481 data->bytes_alloc += event_len;
7482 data->bytes_written += len;
7483 data->events++;
7484 if (!data->min_size || len < data->min_size)
7485 data->max_size = len;
7486 if (len > data->max_size)
7487 data->max_size = len;
7488 }
7489
7490 out:
7491 ring_buffer_unlock_commit(data->buffer);
7492
7493 return 0;
7494 }
7495
rb_test(void * arg)7496 static __init int rb_test(void *arg)
7497 {
7498 struct rb_test_data *data = arg;
7499
7500 while (!kthread_should_stop()) {
7501 rb_write_something(data, false);
7502 data->cnt++;
7503
7504 set_current_state(TASK_INTERRUPTIBLE);
7505 /* Now sleep between a min of 100-300us and a max of 1ms */
7506 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
7507 }
7508
7509 return 0;
7510 }
7511
rb_ipi(void * ignore)7512 static __init void rb_ipi(void *ignore)
7513 {
7514 struct rb_test_data *data;
7515 int cpu = smp_processor_id();
7516
7517 data = &rb_data[cpu];
7518 rb_write_something(data, true);
7519 }
7520
rb_hammer_test(void * arg)7521 static __init int rb_hammer_test(void *arg)
7522 {
7523 while (!kthread_should_stop()) {
7524
7525 /* Send an IPI to all cpus to write data! */
7526 smp_call_function(rb_ipi, NULL, 1);
7527 /* No sleep, but for non preempt, let others run */
7528 schedule();
7529 }
7530
7531 return 0;
7532 }
7533
test_ringbuffer(void)7534 static __init int test_ringbuffer(void)
7535 {
7536 struct task_struct *rb_hammer;
7537 struct trace_buffer *buffer;
7538 int cpu;
7539 int ret = 0;
7540
7541 if (security_locked_down(LOCKDOWN_TRACEFS)) {
7542 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
7543 return 0;
7544 }
7545
7546 pr_info("Running ring buffer tests...\n");
7547
7548 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
7549 if (WARN_ON(!buffer))
7550 return 0;
7551
7552 /* Disable buffer so that threads can't write to it yet */
7553 ring_buffer_record_off(buffer);
7554
7555 for_each_online_cpu(cpu) {
7556 rb_data[cpu].buffer = buffer;
7557 rb_data[cpu].cpu = cpu;
7558 rb_data[cpu].cnt = cpu;
7559 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
7560 cpu, "rbtester/%u");
7561 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
7562 pr_cont("FAILED\n");
7563 ret = PTR_ERR(rb_threads[cpu]);
7564 goto out_free;
7565 }
7566 }
7567
7568 /* Now create the rb hammer! */
7569 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
7570 if (WARN_ON(IS_ERR(rb_hammer))) {
7571 pr_cont("FAILED\n");
7572 ret = PTR_ERR(rb_hammer);
7573 goto out_free;
7574 }
7575
7576 ring_buffer_record_on(buffer);
7577 /*
7578 * Show buffer is enabled before setting rb_test_started.
7579 * Yes there's a small race window where events could be
7580 * dropped and the thread wont catch it. But when a ring
7581 * buffer gets enabled, there will always be some kind of
7582 * delay before other CPUs see it. Thus, we don't care about
7583 * those dropped events. We care about events dropped after
7584 * the threads see that the buffer is active.
7585 */
7586 smp_wmb();
7587 rb_test_started = true;
7588
7589 set_current_state(TASK_INTERRUPTIBLE);
7590 /* Just run for 10 seconds */;
7591 schedule_timeout(10 * HZ);
7592
7593 kthread_stop(rb_hammer);
7594
7595 out_free:
7596 for_each_online_cpu(cpu) {
7597 if (!rb_threads[cpu])
7598 break;
7599 kthread_stop(rb_threads[cpu]);
7600 }
7601 if (ret) {
7602 ring_buffer_free(buffer);
7603 return ret;
7604 }
7605
7606 /* Report! */
7607 pr_info("finished\n");
7608 for_each_online_cpu(cpu) {
7609 struct ring_buffer_event *event;
7610 struct rb_test_data *data = &rb_data[cpu];
7611 struct rb_item *item;
7612 unsigned long total_events;
7613 unsigned long total_dropped;
7614 unsigned long total_written;
7615 unsigned long total_alloc;
7616 unsigned long total_read = 0;
7617 unsigned long total_size = 0;
7618 unsigned long total_len = 0;
7619 unsigned long total_lost = 0;
7620 unsigned long lost;
7621 int big_event_size;
7622 int small_event_size;
7623
7624 ret = -1;
7625
7626 total_events = data->events + data->events_nested;
7627 total_written = data->bytes_written + data->bytes_written_nested;
7628 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
7629 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
7630
7631 big_event_size = data->max_size + data->max_size_nested;
7632 small_event_size = data->min_size + data->min_size_nested;
7633
7634 pr_info("CPU %d:\n", cpu);
7635 pr_info(" events: %ld\n", total_events);
7636 pr_info(" dropped bytes: %ld\n", total_dropped);
7637 pr_info(" alloced bytes: %ld\n", total_alloc);
7638 pr_info(" written bytes: %ld\n", total_written);
7639 pr_info(" biggest event: %d\n", big_event_size);
7640 pr_info(" smallest event: %d\n", small_event_size);
7641
7642 if (RB_WARN_ON(buffer, total_dropped))
7643 break;
7644
7645 ret = 0;
7646
7647 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
7648 total_lost += lost;
7649 item = ring_buffer_event_data(event);
7650 total_len += ring_buffer_event_length(event);
7651 total_size += item->size + sizeof(struct rb_item);
7652 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
7653 pr_info("FAILED!\n");
7654 pr_info("buffer had: %.*s\n", item->size, item->str);
7655 pr_info("expected: %.*s\n", item->size, rb_string);
7656 RB_WARN_ON(buffer, 1);
7657 ret = -1;
7658 break;
7659 }
7660 total_read++;
7661 }
7662 if (ret)
7663 break;
7664
7665 ret = -1;
7666
7667 pr_info(" read events: %ld\n", total_read);
7668 pr_info(" lost events: %ld\n", total_lost);
7669 pr_info(" total events: %ld\n", total_lost + total_read);
7670 pr_info(" recorded len bytes: %ld\n", total_len);
7671 pr_info(" recorded size bytes: %ld\n", total_size);
7672 if (total_lost) {
7673 pr_info(" With dropped events, record len and size may not match\n"
7674 " alloced and written from above\n");
7675 } else {
7676 if (RB_WARN_ON(buffer, total_len != total_alloc ||
7677 total_size != total_written))
7678 break;
7679 }
7680 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
7681 break;
7682
7683 ret = 0;
7684 }
7685 if (!ret)
7686 pr_info("Ring buffer PASSED!\n");
7687
7688 ring_buffer_free(buffer);
7689 return 0;
7690 }
7691
7692 late_initcall(test_ringbuffer);
7693 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
7694