xref: /linux/kernel/trace/ring_buffer.c (revision 564f7dfde24a405d877168f150ae5d29d3ad99c7)
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/trace_events.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/sched/clock.h>
10 #include <linux/trace_seq.h>
11 #include <linux/spinlock.h>
12 #include <linux/irq_work.h>
13 #include <linux/uaccess.h>
14 #include <linux/hardirq.h>
15 #include <linux/kthread.h>	/* for self test */
16 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/percpu.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
25 #include <linux/cpu.h>
26 
27 #include <asm/local.h>
28 
29 static void update_pages_handler(struct work_struct *work);
30 
31 /*
32  * The ring buffer header is special. We must manually up keep it.
33  */
34 int ring_buffer_print_entry_header(struct trace_seq *s)
35 {
36 	trace_seq_puts(s, "# compressed entry header\n");
37 	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
38 	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
39 	trace_seq_puts(s, "\tarray       :   32 bits\n");
40 	trace_seq_putc(s, '\n');
41 	trace_seq_printf(s, "\tpadding     : type == %d\n",
42 			 RINGBUF_TYPE_PADDING);
43 	trace_seq_printf(s, "\ttime_extend : type == %d\n",
44 			 RINGBUF_TYPE_TIME_EXTEND);
45 	trace_seq_printf(s, "\tdata max type_len  == %d\n",
46 			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
47 
48 	return !trace_seq_has_overflowed(s);
49 }
50 
51 /*
52  * The ring buffer is made up of a list of pages. A separate list of pages is
53  * allocated for each CPU. A writer may only write to a buffer that is
54  * associated with the CPU it is currently executing on.  A reader may read
55  * from any per cpu buffer.
56  *
57  * The reader is special. For each per cpu buffer, the reader has its own
58  * reader page. When a reader has read the entire reader page, this reader
59  * page is swapped with another page in the ring buffer.
60  *
61  * Now, as long as the writer is off the reader page, the reader can do what
62  * ever it wants with that page. The writer will never write to that page
63  * again (as long as it is out of the ring buffer).
64  *
65  * Here's some silly ASCII art.
66  *
67  *   +------+
68  *   |reader|          RING BUFFER
69  *   |page  |
70  *   +------+        +---+   +---+   +---+
71  *                   |   |-->|   |-->|   |
72  *                   +---+   +---+   +---+
73  *                     ^               |
74  *                     |               |
75  *                     +---------------+
76  *
77  *
78  *   +------+
79  *   |reader|          RING BUFFER
80  *   |page  |------------------v
81  *   +------+        +---+   +---+   +---+
82  *                   |   |-->|   |-->|   |
83  *                   +---+   +---+   +---+
84  *                     ^               |
85  *                     |               |
86  *                     +---------------+
87  *
88  *
89  *   +------+
90  *   |reader|          RING BUFFER
91  *   |page  |------------------v
92  *   +------+        +---+   +---+   +---+
93  *      ^            |   |-->|   |-->|   |
94  *      |            +---+   +---+   +---+
95  *      |                              |
96  *      |                              |
97  *      +------------------------------+
98  *
99  *
100  *   +------+
101  *   |buffer|          RING BUFFER
102  *   |page  |------------------v
103  *   +------+        +---+   +---+   +---+
104  *      ^            |   |   |   |-->|   |
105  *      |   New      +---+   +---+   +---+
106  *      |  Reader------^               |
107  *      |   page                       |
108  *      +------------------------------+
109  *
110  *
111  * After we make this swap, the reader can hand this page off to the splice
112  * code and be done with it. It can even allocate a new page if it needs to
113  * and swap that into the ring buffer.
114  *
115  * We will be using cmpxchg soon to make all this lockless.
116  *
117  */
118 
119 /* Used for individual buffers (after the counter) */
120 #define RB_BUFFER_OFF		(1 << 20)
121 
122 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
123 
124 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
125 #define RB_ALIGNMENT		4U
126 #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
127 #define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
128 
129 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
130 # define RB_FORCE_8BYTE_ALIGNMENT	0
131 # define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
132 #else
133 # define RB_FORCE_8BYTE_ALIGNMENT	1
134 # define RB_ARCH_ALIGNMENT		8U
135 #endif
136 
137 #define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
138 
139 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
140 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
141 
142 enum {
143 	RB_LEN_TIME_EXTEND = 8,
144 	RB_LEN_TIME_STAMP = 16,
145 };
146 
147 #define skip_time_extend(event) \
148 	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
149 
150 static inline int rb_null_event(struct ring_buffer_event *event)
151 {
152 	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
153 }
154 
155 static void rb_event_set_padding(struct ring_buffer_event *event)
156 {
157 	/* padding has a NULL time_delta */
158 	event->type_len = RINGBUF_TYPE_PADDING;
159 	event->time_delta = 0;
160 }
161 
162 static unsigned
163 rb_event_data_length(struct ring_buffer_event *event)
164 {
165 	unsigned length;
166 
167 	if (event->type_len)
168 		length = event->type_len * RB_ALIGNMENT;
169 	else
170 		length = event->array[0];
171 	return length + RB_EVNT_HDR_SIZE;
172 }
173 
174 /*
175  * Return the length of the given event. Will return
176  * the length of the time extend if the event is a
177  * time extend.
178  */
179 static inline unsigned
180 rb_event_length(struct ring_buffer_event *event)
181 {
182 	switch (event->type_len) {
183 	case RINGBUF_TYPE_PADDING:
184 		if (rb_null_event(event))
185 			/* undefined */
186 			return -1;
187 		return  event->array[0] + RB_EVNT_HDR_SIZE;
188 
189 	case RINGBUF_TYPE_TIME_EXTEND:
190 		return RB_LEN_TIME_EXTEND;
191 
192 	case RINGBUF_TYPE_TIME_STAMP:
193 		return RB_LEN_TIME_STAMP;
194 
195 	case RINGBUF_TYPE_DATA:
196 		return rb_event_data_length(event);
197 	default:
198 		BUG();
199 	}
200 	/* not hit */
201 	return 0;
202 }
203 
204 /*
205  * Return total length of time extend and data,
206  *   or just the event length for all other events.
207  */
208 static inline unsigned
209 rb_event_ts_length(struct ring_buffer_event *event)
210 {
211 	unsigned len = 0;
212 
213 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
214 		/* time extends include the data event after it */
215 		len = RB_LEN_TIME_EXTEND;
216 		event = skip_time_extend(event);
217 	}
218 	return len + rb_event_length(event);
219 }
220 
221 /**
222  * ring_buffer_event_length - return the length of the event
223  * @event: the event to get the length of
224  *
225  * Returns the size of the data load of a data event.
226  * If the event is something other than a data event, it
227  * returns the size of the event itself. With the exception
228  * of a TIME EXTEND, where it still returns the size of the
229  * data load of the data event after it.
230  */
231 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
232 {
233 	unsigned length;
234 
235 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
236 		event = skip_time_extend(event);
237 
238 	length = rb_event_length(event);
239 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
240 		return length;
241 	length -= RB_EVNT_HDR_SIZE;
242 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
243                 length -= sizeof(event->array[0]);
244 	return length;
245 }
246 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
247 
248 /* inline for ring buffer fast paths */
249 static __always_inline void *
250 rb_event_data(struct ring_buffer_event *event)
251 {
252 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
253 		event = skip_time_extend(event);
254 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
255 	/* If length is in len field, then array[0] has the data */
256 	if (event->type_len)
257 		return (void *)&event->array[0];
258 	/* Otherwise length is in array[0] and array[1] has the data */
259 	return (void *)&event->array[1];
260 }
261 
262 /**
263  * ring_buffer_event_data - return the data of the event
264  * @event: the event to get the data from
265  */
266 void *ring_buffer_event_data(struct ring_buffer_event *event)
267 {
268 	return rb_event_data(event);
269 }
270 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
271 
272 #define for_each_buffer_cpu(buffer, cpu)		\
273 	for_each_cpu(cpu, buffer->cpumask)
274 
275 #define TS_SHIFT	27
276 #define TS_MASK		((1ULL << TS_SHIFT) - 1)
277 #define TS_DELTA_TEST	(~TS_MASK)
278 
279 /* Flag when events were overwritten */
280 #define RB_MISSED_EVENTS	(1 << 31)
281 /* Missed count stored at end */
282 #define RB_MISSED_STORED	(1 << 30)
283 
284 struct buffer_data_page {
285 	u64		 time_stamp;	/* page time stamp */
286 	local_t		 commit;	/* write committed index */
287 	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
288 };
289 
290 /*
291  * Note, the buffer_page list must be first. The buffer pages
292  * are allocated in cache lines, which means that each buffer
293  * page will be at the beginning of a cache line, and thus
294  * the least significant bits will be zero. We use this to
295  * add flags in the list struct pointers, to make the ring buffer
296  * lockless.
297  */
298 struct buffer_page {
299 	struct list_head list;		/* list of buffer pages */
300 	local_t		 write;		/* index for next write */
301 	unsigned	 read;		/* index for next read */
302 	local_t		 entries;	/* entries on this page */
303 	unsigned long	 real_end;	/* real end of data */
304 	struct buffer_data_page *page;	/* Actual data page */
305 };
306 
307 /*
308  * The buffer page counters, write and entries, must be reset
309  * atomically when crossing page boundaries. To synchronize this
310  * update, two counters are inserted into the number. One is
311  * the actual counter for the write position or count on the page.
312  *
313  * The other is a counter of updaters. Before an update happens
314  * the update partition of the counter is incremented. This will
315  * allow the updater to update the counter atomically.
316  *
317  * The counter is 20 bits, and the state data is 12.
318  */
319 #define RB_WRITE_MASK		0xfffff
320 #define RB_WRITE_INTCNT		(1 << 20)
321 
322 static void rb_init_page(struct buffer_data_page *bpage)
323 {
324 	local_set(&bpage->commit, 0);
325 }
326 
327 /**
328  * ring_buffer_page_len - the size of data on the page.
329  * @page: The page to read
330  *
331  * Returns the amount of data on the page, including buffer page header.
332  */
333 size_t ring_buffer_page_len(void *page)
334 {
335 	return local_read(&((struct buffer_data_page *)page)->commit)
336 		+ BUF_PAGE_HDR_SIZE;
337 }
338 
339 /*
340  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
341  * this issue out.
342  */
343 static void free_buffer_page(struct buffer_page *bpage)
344 {
345 	free_page((unsigned long)bpage->page);
346 	kfree(bpage);
347 }
348 
349 /*
350  * We need to fit the time_stamp delta into 27 bits.
351  */
352 static inline int test_time_stamp(u64 delta)
353 {
354 	if (delta & TS_DELTA_TEST)
355 		return 1;
356 	return 0;
357 }
358 
359 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
360 
361 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
362 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
363 
364 int ring_buffer_print_page_header(struct trace_seq *s)
365 {
366 	struct buffer_data_page field;
367 
368 	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
369 			 "offset:0;\tsize:%u;\tsigned:%u;\n",
370 			 (unsigned int)sizeof(field.time_stamp),
371 			 (unsigned int)is_signed_type(u64));
372 
373 	trace_seq_printf(s, "\tfield: local_t commit;\t"
374 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
375 			 (unsigned int)offsetof(typeof(field), commit),
376 			 (unsigned int)sizeof(field.commit),
377 			 (unsigned int)is_signed_type(long));
378 
379 	trace_seq_printf(s, "\tfield: int overwrite;\t"
380 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
381 			 (unsigned int)offsetof(typeof(field), commit),
382 			 1,
383 			 (unsigned int)is_signed_type(long));
384 
385 	trace_seq_printf(s, "\tfield: char data;\t"
386 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
387 			 (unsigned int)offsetof(typeof(field), data),
388 			 (unsigned int)BUF_PAGE_SIZE,
389 			 (unsigned int)is_signed_type(char));
390 
391 	return !trace_seq_has_overflowed(s);
392 }
393 
394 struct rb_irq_work {
395 	struct irq_work			work;
396 	wait_queue_head_t		waiters;
397 	wait_queue_head_t		full_waiters;
398 	bool				waiters_pending;
399 	bool				full_waiters_pending;
400 	bool				wakeup_full;
401 };
402 
403 /*
404  * Structure to hold event state and handle nested events.
405  */
406 struct rb_event_info {
407 	u64			ts;
408 	u64			delta;
409 	unsigned long		length;
410 	struct buffer_page	*tail_page;
411 	int			add_timestamp;
412 };
413 
414 /*
415  * Used for which event context the event is in.
416  *  NMI     = 0
417  *  IRQ     = 1
418  *  SOFTIRQ = 2
419  *  NORMAL  = 3
420  *
421  * See trace_recursive_lock() comment below for more details.
422  */
423 enum {
424 	RB_CTX_NMI,
425 	RB_CTX_IRQ,
426 	RB_CTX_SOFTIRQ,
427 	RB_CTX_NORMAL,
428 	RB_CTX_MAX
429 };
430 
431 /*
432  * head_page == tail_page && head == tail then buffer is empty.
433  */
434 struct ring_buffer_per_cpu {
435 	int				cpu;
436 	atomic_t			record_disabled;
437 	struct ring_buffer		*buffer;
438 	raw_spinlock_t			reader_lock;	/* serialize readers */
439 	arch_spinlock_t			lock;
440 	struct lock_class_key		lock_key;
441 	unsigned long			nr_pages;
442 	unsigned int			current_context;
443 	struct list_head		*pages;
444 	struct buffer_page		*head_page;	/* read from head */
445 	struct buffer_page		*tail_page;	/* write to tail */
446 	struct buffer_page		*commit_page;	/* committed pages */
447 	struct buffer_page		*reader_page;
448 	unsigned long			lost_events;
449 	unsigned long			last_overrun;
450 	local_t				entries_bytes;
451 	local_t				entries;
452 	local_t				overrun;
453 	local_t				commit_overrun;
454 	local_t				dropped_events;
455 	local_t				committing;
456 	local_t				commits;
457 	unsigned long			read;
458 	unsigned long			read_bytes;
459 	u64				write_stamp;
460 	u64				read_stamp;
461 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
462 	long				nr_pages_to_update;
463 	struct list_head		new_pages; /* new pages to add */
464 	struct work_struct		update_pages_work;
465 	struct completion		update_done;
466 
467 	struct rb_irq_work		irq_work;
468 };
469 
470 struct ring_buffer {
471 	unsigned			flags;
472 	int				cpus;
473 	atomic_t			record_disabled;
474 	atomic_t			resize_disabled;
475 	cpumask_var_t			cpumask;
476 
477 	struct lock_class_key		*reader_lock_key;
478 
479 	struct mutex			mutex;
480 
481 	struct ring_buffer_per_cpu	**buffers;
482 
483 	struct hlist_node		node;
484 	u64				(*clock)(void);
485 
486 	struct rb_irq_work		irq_work;
487 };
488 
489 struct ring_buffer_iter {
490 	struct ring_buffer_per_cpu	*cpu_buffer;
491 	unsigned long			head;
492 	struct buffer_page		*head_page;
493 	struct buffer_page		*cache_reader_page;
494 	unsigned long			cache_read;
495 	u64				read_stamp;
496 };
497 
498 /*
499  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
500  *
501  * Schedules a delayed work to wake up any task that is blocked on the
502  * ring buffer waiters queue.
503  */
504 static void rb_wake_up_waiters(struct irq_work *work)
505 {
506 	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
507 
508 	wake_up_all(&rbwork->waiters);
509 	if (rbwork->wakeup_full) {
510 		rbwork->wakeup_full = false;
511 		wake_up_all(&rbwork->full_waiters);
512 	}
513 }
514 
515 /**
516  * ring_buffer_wait - wait for input to the ring buffer
517  * @buffer: buffer to wait on
518  * @cpu: the cpu buffer to wait on
519  * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
520  *
521  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
522  * as data is added to any of the @buffer's cpu buffers. Otherwise
523  * it will wait for data to be added to a specific cpu buffer.
524  */
525 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
526 {
527 	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
528 	DEFINE_WAIT(wait);
529 	struct rb_irq_work *work;
530 	int ret = 0;
531 
532 	/*
533 	 * Depending on what the caller is waiting for, either any
534 	 * data in any cpu buffer, or a specific buffer, put the
535 	 * caller on the appropriate wait queue.
536 	 */
537 	if (cpu == RING_BUFFER_ALL_CPUS) {
538 		work = &buffer->irq_work;
539 		/* Full only makes sense on per cpu reads */
540 		full = false;
541 	} else {
542 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
543 			return -ENODEV;
544 		cpu_buffer = buffer->buffers[cpu];
545 		work = &cpu_buffer->irq_work;
546 	}
547 
548 
549 	while (true) {
550 		if (full)
551 			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
552 		else
553 			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
554 
555 		/*
556 		 * The events can happen in critical sections where
557 		 * checking a work queue can cause deadlocks.
558 		 * After adding a task to the queue, this flag is set
559 		 * only to notify events to try to wake up the queue
560 		 * using irq_work.
561 		 *
562 		 * We don't clear it even if the buffer is no longer
563 		 * empty. The flag only causes the next event to run
564 		 * irq_work to do the work queue wake up. The worse
565 		 * that can happen if we race with !trace_empty() is that
566 		 * an event will cause an irq_work to try to wake up
567 		 * an empty queue.
568 		 *
569 		 * There's no reason to protect this flag either, as
570 		 * the work queue and irq_work logic will do the necessary
571 		 * synchronization for the wake ups. The only thing
572 		 * that is necessary is that the wake up happens after
573 		 * a task has been queued. It's OK for spurious wake ups.
574 		 */
575 		if (full)
576 			work->full_waiters_pending = true;
577 		else
578 			work->waiters_pending = true;
579 
580 		if (signal_pending(current)) {
581 			ret = -EINTR;
582 			break;
583 		}
584 
585 		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
586 			break;
587 
588 		if (cpu != RING_BUFFER_ALL_CPUS &&
589 		    !ring_buffer_empty_cpu(buffer, cpu)) {
590 			unsigned long flags;
591 			bool pagebusy;
592 
593 			if (!full)
594 				break;
595 
596 			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
597 			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
598 			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
599 
600 			if (!pagebusy)
601 				break;
602 		}
603 
604 		schedule();
605 	}
606 
607 	if (full)
608 		finish_wait(&work->full_waiters, &wait);
609 	else
610 		finish_wait(&work->waiters, &wait);
611 
612 	return ret;
613 }
614 
615 /**
616  * ring_buffer_poll_wait - poll on buffer input
617  * @buffer: buffer to wait on
618  * @cpu: the cpu buffer to wait on
619  * @filp: the file descriptor
620  * @poll_table: The poll descriptor
621  *
622  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
623  * as data is added to any of the @buffer's cpu buffers. Otherwise
624  * it will wait for data to be added to a specific cpu buffer.
625  *
626  * Returns POLLIN | POLLRDNORM if data exists in the buffers,
627  * zero otherwise.
628  */
629 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
630 			  struct file *filp, poll_table *poll_table)
631 {
632 	struct ring_buffer_per_cpu *cpu_buffer;
633 	struct rb_irq_work *work;
634 
635 	if (cpu == RING_BUFFER_ALL_CPUS)
636 		work = &buffer->irq_work;
637 	else {
638 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
639 			return -EINVAL;
640 
641 		cpu_buffer = buffer->buffers[cpu];
642 		work = &cpu_buffer->irq_work;
643 	}
644 
645 	poll_wait(filp, &work->waiters, poll_table);
646 	work->waiters_pending = true;
647 	/*
648 	 * There's a tight race between setting the waiters_pending and
649 	 * checking if the ring buffer is empty.  Once the waiters_pending bit
650 	 * is set, the next event will wake the task up, but we can get stuck
651 	 * if there's only a single event in.
652 	 *
653 	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
654 	 * but adding a memory barrier to all events will cause too much of a
655 	 * performance hit in the fast path.  We only need a memory barrier when
656 	 * the buffer goes from empty to having content.  But as this race is
657 	 * extremely small, and it's not a problem if another event comes in, we
658 	 * will fix it later.
659 	 */
660 	smp_mb();
661 
662 	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
663 	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
664 		return POLLIN | POLLRDNORM;
665 	return 0;
666 }
667 
668 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
669 #define RB_WARN_ON(b, cond)						\
670 	({								\
671 		int _____ret = unlikely(cond);				\
672 		if (_____ret) {						\
673 			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
674 				struct ring_buffer_per_cpu *__b =	\
675 					(void *)b;			\
676 				atomic_inc(&__b->buffer->record_disabled); \
677 			} else						\
678 				atomic_inc(&b->record_disabled);	\
679 			WARN_ON(1);					\
680 		}							\
681 		_____ret;						\
682 	})
683 
684 /* Up this if you want to test the TIME_EXTENTS and normalization */
685 #define DEBUG_SHIFT 0
686 
687 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
688 {
689 	/* shift to debug/test normalization and TIME_EXTENTS */
690 	return buffer->clock() << DEBUG_SHIFT;
691 }
692 
693 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
694 {
695 	u64 time;
696 
697 	preempt_disable_notrace();
698 	time = rb_time_stamp(buffer);
699 	preempt_enable_no_resched_notrace();
700 
701 	return time;
702 }
703 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
704 
705 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
706 				      int cpu, u64 *ts)
707 {
708 	/* Just stupid testing the normalize function and deltas */
709 	*ts >>= DEBUG_SHIFT;
710 }
711 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
712 
713 /*
714  * Making the ring buffer lockless makes things tricky.
715  * Although writes only happen on the CPU that they are on,
716  * and they only need to worry about interrupts. Reads can
717  * happen on any CPU.
718  *
719  * The reader page is always off the ring buffer, but when the
720  * reader finishes with a page, it needs to swap its page with
721  * a new one from the buffer. The reader needs to take from
722  * the head (writes go to the tail). But if a writer is in overwrite
723  * mode and wraps, it must push the head page forward.
724  *
725  * Here lies the problem.
726  *
727  * The reader must be careful to replace only the head page, and
728  * not another one. As described at the top of the file in the
729  * ASCII art, the reader sets its old page to point to the next
730  * page after head. It then sets the page after head to point to
731  * the old reader page. But if the writer moves the head page
732  * during this operation, the reader could end up with the tail.
733  *
734  * We use cmpxchg to help prevent this race. We also do something
735  * special with the page before head. We set the LSB to 1.
736  *
737  * When the writer must push the page forward, it will clear the
738  * bit that points to the head page, move the head, and then set
739  * the bit that points to the new head page.
740  *
741  * We also don't want an interrupt coming in and moving the head
742  * page on another writer. Thus we use the second LSB to catch
743  * that too. Thus:
744  *
745  * head->list->prev->next        bit 1          bit 0
746  *                              -------        -------
747  * Normal page                     0              0
748  * Points to head page             0              1
749  * New head page                   1              0
750  *
751  * Note we can not trust the prev pointer of the head page, because:
752  *
753  * +----+       +-----+        +-----+
754  * |    |------>|  T  |---X--->|  N  |
755  * |    |<------|     |        |     |
756  * +----+       +-----+        +-----+
757  *   ^                           ^ |
758  *   |          +-----+          | |
759  *   +----------|  R  |----------+ |
760  *              |     |<-----------+
761  *              +-----+
762  *
763  * Key:  ---X-->  HEAD flag set in pointer
764  *         T      Tail page
765  *         R      Reader page
766  *         N      Next page
767  *
768  * (see __rb_reserve_next() to see where this happens)
769  *
770  *  What the above shows is that the reader just swapped out
771  *  the reader page with a page in the buffer, but before it
772  *  could make the new header point back to the new page added
773  *  it was preempted by a writer. The writer moved forward onto
774  *  the new page added by the reader and is about to move forward
775  *  again.
776  *
777  *  You can see, it is legitimate for the previous pointer of
778  *  the head (or any page) not to point back to itself. But only
779  *  temporarially.
780  */
781 
782 #define RB_PAGE_NORMAL		0UL
783 #define RB_PAGE_HEAD		1UL
784 #define RB_PAGE_UPDATE		2UL
785 
786 
787 #define RB_FLAG_MASK		3UL
788 
789 /* PAGE_MOVED is not part of the mask */
790 #define RB_PAGE_MOVED		4UL
791 
792 /*
793  * rb_list_head - remove any bit
794  */
795 static struct list_head *rb_list_head(struct list_head *list)
796 {
797 	unsigned long val = (unsigned long)list;
798 
799 	return (struct list_head *)(val & ~RB_FLAG_MASK);
800 }
801 
802 /*
803  * rb_is_head_page - test if the given page is the head page
804  *
805  * Because the reader may move the head_page pointer, we can
806  * not trust what the head page is (it may be pointing to
807  * the reader page). But if the next page is a header page,
808  * its flags will be non zero.
809  */
810 static inline int
811 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
812 		struct buffer_page *page, struct list_head *list)
813 {
814 	unsigned long val;
815 
816 	val = (unsigned long)list->next;
817 
818 	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
819 		return RB_PAGE_MOVED;
820 
821 	return val & RB_FLAG_MASK;
822 }
823 
824 /*
825  * rb_is_reader_page
826  *
827  * The unique thing about the reader page, is that, if the
828  * writer is ever on it, the previous pointer never points
829  * back to the reader page.
830  */
831 static bool rb_is_reader_page(struct buffer_page *page)
832 {
833 	struct list_head *list = page->list.prev;
834 
835 	return rb_list_head(list->next) != &page->list;
836 }
837 
838 /*
839  * rb_set_list_to_head - set a list_head to be pointing to head.
840  */
841 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
842 				struct list_head *list)
843 {
844 	unsigned long *ptr;
845 
846 	ptr = (unsigned long *)&list->next;
847 	*ptr |= RB_PAGE_HEAD;
848 	*ptr &= ~RB_PAGE_UPDATE;
849 }
850 
851 /*
852  * rb_head_page_activate - sets up head page
853  */
854 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
855 {
856 	struct buffer_page *head;
857 
858 	head = cpu_buffer->head_page;
859 	if (!head)
860 		return;
861 
862 	/*
863 	 * Set the previous list pointer to have the HEAD flag.
864 	 */
865 	rb_set_list_to_head(cpu_buffer, head->list.prev);
866 }
867 
868 static void rb_list_head_clear(struct list_head *list)
869 {
870 	unsigned long *ptr = (unsigned long *)&list->next;
871 
872 	*ptr &= ~RB_FLAG_MASK;
873 }
874 
875 /*
876  * rb_head_page_dactivate - clears head page ptr (for free list)
877  */
878 static void
879 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
880 {
881 	struct list_head *hd;
882 
883 	/* Go through the whole list and clear any pointers found. */
884 	rb_list_head_clear(cpu_buffer->pages);
885 
886 	list_for_each(hd, cpu_buffer->pages)
887 		rb_list_head_clear(hd);
888 }
889 
890 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
891 			    struct buffer_page *head,
892 			    struct buffer_page *prev,
893 			    int old_flag, int new_flag)
894 {
895 	struct list_head *list;
896 	unsigned long val = (unsigned long)&head->list;
897 	unsigned long ret;
898 
899 	list = &prev->list;
900 
901 	val &= ~RB_FLAG_MASK;
902 
903 	ret = cmpxchg((unsigned long *)&list->next,
904 		      val | old_flag, val | new_flag);
905 
906 	/* check if the reader took the page */
907 	if ((ret & ~RB_FLAG_MASK) != val)
908 		return RB_PAGE_MOVED;
909 
910 	return ret & RB_FLAG_MASK;
911 }
912 
913 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
914 				   struct buffer_page *head,
915 				   struct buffer_page *prev,
916 				   int old_flag)
917 {
918 	return rb_head_page_set(cpu_buffer, head, prev,
919 				old_flag, RB_PAGE_UPDATE);
920 }
921 
922 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
923 				 struct buffer_page *head,
924 				 struct buffer_page *prev,
925 				 int old_flag)
926 {
927 	return rb_head_page_set(cpu_buffer, head, prev,
928 				old_flag, RB_PAGE_HEAD);
929 }
930 
931 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
932 				   struct buffer_page *head,
933 				   struct buffer_page *prev,
934 				   int old_flag)
935 {
936 	return rb_head_page_set(cpu_buffer, head, prev,
937 				old_flag, RB_PAGE_NORMAL);
938 }
939 
940 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
941 			       struct buffer_page **bpage)
942 {
943 	struct list_head *p = rb_list_head((*bpage)->list.next);
944 
945 	*bpage = list_entry(p, struct buffer_page, list);
946 }
947 
948 static struct buffer_page *
949 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
950 {
951 	struct buffer_page *head;
952 	struct buffer_page *page;
953 	struct list_head *list;
954 	int i;
955 
956 	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
957 		return NULL;
958 
959 	/* sanity check */
960 	list = cpu_buffer->pages;
961 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
962 		return NULL;
963 
964 	page = head = cpu_buffer->head_page;
965 	/*
966 	 * It is possible that the writer moves the header behind
967 	 * where we started, and we miss in one loop.
968 	 * A second loop should grab the header, but we'll do
969 	 * three loops just because I'm paranoid.
970 	 */
971 	for (i = 0; i < 3; i++) {
972 		do {
973 			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
974 				cpu_buffer->head_page = page;
975 				return page;
976 			}
977 			rb_inc_page(cpu_buffer, &page);
978 		} while (page != head);
979 	}
980 
981 	RB_WARN_ON(cpu_buffer, 1);
982 
983 	return NULL;
984 }
985 
986 static int rb_head_page_replace(struct buffer_page *old,
987 				struct buffer_page *new)
988 {
989 	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
990 	unsigned long val;
991 	unsigned long ret;
992 
993 	val = *ptr & ~RB_FLAG_MASK;
994 	val |= RB_PAGE_HEAD;
995 
996 	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
997 
998 	return ret == val;
999 }
1000 
1001 /*
1002  * rb_tail_page_update - move the tail page forward
1003  */
1004 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1005 			       struct buffer_page *tail_page,
1006 			       struct buffer_page *next_page)
1007 {
1008 	unsigned long old_entries;
1009 	unsigned long old_write;
1010 
1011 	/*
1012 	 * The tail page now needs to be moved forward.
1013 	 *
1014 	 * We need to reset the tail page, but without messing
1015 	 * with possible erasing of data brought in by interrupts
1016 	 * that have moved the tail page and are currently on it.
1017 	 *
1018 	 * We add a counter to the write field to denote this.
1019 	 */
1020 	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1021 	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1022 
1023 	/*
1024 	 * Just make sure we have seen our old_write and synchronize
1025 	 * with any interrupts that come in.
1026 	 */
1027 	barrier();
1028 
1029 	/*
1030 	 * If the tail page is still the same as what we think
1031 	 * it is, then it is up to us to update the tail
1032 	 * pointer.
1033 	 */
1034 	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1035 		/* Zero the write counter */
1036 		unsigned long val = old_write & ~RB_WRITE_MASK;
1037 		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1038 
1039 		/*
1040 		 * This will only succeed if an interrupt did
1041 		 * not come in and change it. In which case, we
1042 		 * do not want to modify it.
1043 		 *
1044 		 * We add (void) to let the compiler know that we do not care
1045 		 * about the return value of these functions. We use the
1046 		 * cmpxchg to only update if an interrupt did not already
1047 		 * do it for us. If the cmpxchg fails, we don't care.
1048 		 */
1049 		(void)local_cmpxchg(&next_page->write, old_write, val);
1050 		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1051 
1052 		/*
1053 		 * No need to worry about races with clearing out the commit.
1054 		 * it only can increment when a commit takes place. But that
1055 		 * only happens in the outer most nested commit.
1056 		 */
1057 		local_set(&next_page->page->commit, 0);
1058 
1059 		/* Again, either we update tail_page or an interrupt does */
1060 		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1061 	}
1062 }
1063 
1064 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1065 			  struct buffer_page *bpage)
1066 {
1067 	unsigned long val = (unsigned long)bpage;
1068 
1069 	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1070 		return 1;
1071 
1072 	return 0;
1073 }
1074 
1075 /**
1076  * rb_check_list - make sure a pointer to a list has the last bits zero
1077  */
1078 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1079 			 struct list_head *list)
1080 {
1081 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1082 		return 1;
1083 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1084 		return 1;
1085 	return 0;
1086 }
1087 
1088 /**
1089  * rb_check_pages - integrity check of buffer pages
1090  * @cpu_buffer: CPU buffer with pages to test
1091  *
1092  * As a safety measure we check to make sure the data pages have not
1093  * been corrupted.
1094  */
1095 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1096 {
1097 	struct list_head *head = cpu_buffer->pages;
1098 	struct buffer_page *bpage, *tmp;
1099 
1100 	/* Reset the head page if it exists */
1101 	if (cpu_buffer->head_page)
1102 		rb_set_head_page(cpu_buffer);
1103 
1104 	rb_head_page_deactivate(cpu_buffer);
1105 
1106 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1107 		return -1;
1108 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1109 		return -1;
1110 
1111 	if (rb_check_list(cpu_buffer, head))
1112 		return -1;
1113 
1114 	list_for_each_entry_safe(bpage, tmp, head, list) {
1115 		if (RB_WARN_ON(cpu_buffer,
1116 			       bpage->list.next->prev != &bpage->list))
1117 			return -1;
1118 		if (RB_WARN_ON(cpu_buffer,
1119 			       bpage->list.prev->next != &bpage->list))
1120 			return -1;
1121 		if (rb_check_list(cpu_buffer, &bpage->list))
1122 			return -1;
1123 	}
1124 
1125 	rb_head_page_activate(cpu_buffer);
1126 
1127 	return 0;
1128 }
1129 
1130 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1131 {
1132 	struct buffer_page *bpage, *tmp;
1133 	long i;
1134 
1135 	for (i = 0; i < nr_pages; i++) {
1136 		struct page *page;
1137 		/*
1138 		 * __GFP_NORETRY flag makes sure that the allocation fails
1139 		 * gracefully without invoking oom-killer and the system is
1140 		 * not destabilized.
1141 		 */
1142 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1143 				    GFP_KERNEL | __GFP_NORETRY,
1144 				    cpu_to_node(cpu));
1145 		if (!bpage)
1146 			goto free_pages;
1147 
1148 		list_add(&bpage->list, pages);
1149 
1150 		page = alloc_pages_node(cpu_to_node(cpu),
1151 					GFP_KERNEL | __GFP_NORETRY, 0);
1152 		if (!page)
1153 			goto free_pages;
1154 		bpage->page = page_address(page);
1155 		rb_init_page(bpage->page);
1156 	}
1157 
1158 	return 0;
1159 
1160 free_pages:
1161 	list_for_each_entry_safe(bpage, tmp, pages, list) {
1162 		list_del_init(&bpage->list);
1163 		free_buffer_page(bpage);
1164 	}
1165 
1166 	return -ENOMEM;
1167 }
1168 
1169 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1170 			     unsigned long nr_pages)
1171 {
1172 	LIST_HEAD(pages);
1173 
1174 	WARN_ON(!nr_pages);
1175 
1176 	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1177 		return -ENOMEM;
1178 
1179 	/*
1180 	 * The ring buffer page list is a circular list that does not
1181 	 * start and end with a list head. All page list items point to
1182 	 * other pages.
1183 	 */
1184 	cpu_buffer->pages = pages.next;
1185 	list_del(&pages);
1186 
1187 	cpu_buffer->nr_pages = nr_pages;
1188 
1189 	rb_check_pages(cpu_buffer);
1190 
1191 	return 0;
1192 }
1193 
1194 static struct ring_buffer_per_cpu *
1195 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1196 {
1197 	struct ring_buffer_per_cpu *cpu_buffer;
1198 	struct buffer_page *bpage;
1199 	struct page *page;
1200 	int ret;
1201 
1202 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1203 				  GFP_KERNEL, cpu_to_node(cpu));
1204 	if (!cpu_buffer)
1205 		return NULL;
1206 
1207 	cpu_buffer->cpu = cpu;
1208 	cpu_buffer->buffer = buffer;
1209 	raw_spin_lock_init(&cpu_buffer->reader_lock);
1210 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1211 	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1212 	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1213 	init_completion(&cpu_buffer->update_done);
1214 	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1215 	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1216 	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1217 
1218 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1219 			    GFP_KERNEL, cpu_to_node(cpu));
1220 	if (!bpage)
1221 		goto fail_free_buffer;
1222 
1223 	rb_check_bpage(cpu_buffer, bpage);
1224 
1225 	cpu_buffer->reader_page = bpage;
1226 	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1227 	if (!page)
1228 		goto fail_free_reader;
1229 	bpage->page = page_address(page);
1230 	rb_init_page(bpage->page);
1231 
1232 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1233 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1234 
1235 	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1236 	if (ret < 0)
1237 		goto fail_free_reader;
1238 
1239 	cpu_buffer->head_page
1240 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1241 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1242 
1243 	rb_head_page_activate(cpu_buffer);
1244 
1245 	return cpu_buffer;
1246 
1247  fail_free_reader:
1248 	free_buffer_page(cpu_buffer->reader_page);
1249 
1250  fail_free_buffer:
1251 	kfree(cpu_buffer);
1252 	return NULL;
1253 }
1254 
1255 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1256 {
1257 	struct list_head *head = cpu_buffer->pages;
1258 	struct buffer_page *bpage, *tmp;
1259 
1260 	free_buffer_page(cpu_buffer->reader_page);
1261 
1262 	rb_head_page_deactivate(cpu_buffer);
1263 
1264 	if (head) {
1265 		list_for_each_entry_safe(bpage, tmp, head, list) {
1266 			list_del_init(&bpage->list);
1267 			free_buffer_page(bpage);
1268 		}
1269 		bpage = list_entry(head, struct buffer_page, list);
1270 		free_buffer_page(bpage);
1271 	}
1272 
1273 	kfree(cpu_buffer);
1274 }
1275 
1276 /**
1277  * __ring_buffer_alloc - allocate a new ring_buffer
1278  * @size: the size in bytes per cpu that is needed.
1279  * @flags: attributes to set for the ring buffer.
1280  *
1281  * Currently the only flag that is available is the RB_FL_OVERWRITE
1282  * flag. This flag means that the buffer will overwrite old data
1283  * when the buffer wraps. If this flag is not set, the buffer will
1284  * drop data when the tail hits the head.
1285  */
1286 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1287 					struct lock_class_key *key)
1288 {
1289 	struct ring_buffer *buffer;
1290 	long nr_pages;
1291 	int bsize;
1292 	int cpu;
1293 	int ret;
1294 
1295 	/* keep it in its own cache line */
1296 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1297 			 GFP_KERNEL);
1298 	if (!buffer)
1299 		return NULL;
1300 
1301 	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1302 		goto fail_free_buffer;
1303 
1304 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1305 	buffer->flags = flags;
1306 	buffer->clock = trace_clock_local;
1307 	buffer->reader_lock_key = key;
1308 
1309 	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1310 	init_waitqueue_head(&buffer->irq_work.waiters);
1311 
1312 	/* need at least two pages */
1313 	if (nr_pages < 2)
1314 		nr_pages = 2;
1315 
1316 	buffer->cpus = nr_cpu_ids;
1317 
1318 	bsize = sizeof(void *) * nr_cpu_ids;
1319 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1320 				  GFP_KERNEL);
1321 	if (!buffer->buffers)
1322 		goto fail_free_cpumask;
1323 
1324 	cpu = raw_smp_processor_id();
1325 	cpumask_set_cpu(cpu, buffer->cpumask);
1326 	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1327 	if (!buffer->buffers[cpu])
1328 		goto fail_free_buffers;
1329 
1330 	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1331 	if (ret < 0)
1332 		goto fail_free_buffers;
1333 
1334 	mutex_init(&buffer->mutex);
1335 
1336 	return buffer;
1337 
1338  fail_free_buffers:
1339 	for_each_buffer_cpu(buffer, cpu) {
1340 		if (buffer->buffers[cpu])
1341 			rb_free_cpu_buffer(buffer->buffers[cpu]);
1342 	}
1343 	kfree(buffer->buffers);
1344 
1345  fail_free_cpumask:
1346 	free_cpumask_var(buffer->cpumask);
1347 
1348  fail_free_buffer:
1349 	kfree(buffer);
1350 	return NULL;
1351 }
1352 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1353 
1354 /**
1355  * ring_buffer_free - free a ring buffer.
1356  * @buffer: the buffer to free.
1357  */
1358 void
1359 ring_buffer_free(struct ring_buffer *buffer)
1360 {
1361 	int cpu;
1362 
1363 	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1364 
1365 	for_each_buffer_cpu(buffer, cpu)
1366 		rb_free_cpu_buffer(buffer->buffers[cpu]);
1367 
1368 	kfree(buffer->buffers);
1369 	free_cpumask_var(buffer->cpumask);
1370 
1371 	kfree(buffer);
1372 }
1373 EXPORT_SYMBOL_GPL(ring_buffer_free);
1374 
1375 void ring_buffer_set_clock(struct ring_buffer *buffer,
1376 			   u64 (*clock)(void))
1377 {
1378 	buffer->clock = clock;
1379 }
1380 
1381 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1382 
1383 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1384 {
1385 	return local_read(&bpage->entries) & RB_WRITE_MASK;
1386 }
1387 
1388 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1389 {
1390 	return local_read(&bpage->write) & RB_WRITE_MASK;
1391 }
1392 
1393 static int
1394 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1395 {
1396 	struct list_head *tail_page, *to_remove, *next_page;
1397 	struct buffer_page *to_remove_page, *tmp_iter_page;
1398 	struct buffer_page *last_page, *first_page;
1399 	unsigned long nr_removed;
1400 	unsigned long head_bit;
1401 	int page_entries;
1402 
1403 	head_bit = 0;
1404 
1405 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1406 	atomic_inc(&cpu_buffer->record_disabled);
1407 	/*
1408 	 * We don't race with the readers since we have acquired the reader
1409 	 * lock. We also don't race with writers after disabling recording.
1410 	 * This makes it easy to figure out the first and the last page to be
1411 	 * removed from the list. We unlink all the pages in between including
1412 	 * the first and last pages. This is done in a busy loop so that we
1413 	 * lose the least number of traces.
1414 	 * The pages are freed after we restart recording and unlock readers.
1415 	 */
1416 	tail_page = &cpu_buffer->tail_page->list;
1417 
1418 	/*
1419 	 * tail page might be on reader page, we remove the next page
1420 	 * from the ring buffer
1421 	 */
1422 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1423 		tail_page = rb_list_head(tail_page->next);
1424 	to_remove = tail_page;
1425 
1426 	/* start of pages to remove */
1427 	first_page = list_entry(rb_list_head(to_remove->next),
1428 				struct buffer_page, list);
1429 
1430 	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1431 		to_remove = rb_list_head(to_remove)->next;
1432 		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1433 	}
1434 
1435 	next_page = rb_list_head(to_remove)->next;
1436 
1437 	/*
1438 	 * Now we remove all pages between tail_page and next_page.
1439 	 * Make sure that we have head_bit value preserved for the
1440 	 * next page
1441 	 */
1442 	tail_page->next = (struct list_head *)((unsigned long)next_page |
1443 						head_bit);
1444 	next_page = rb_list_head(next_page);
1445 	next_page->prev = tail_page;
1446 
1447 	/* make sure pages points to a valid page in the ring buffer */
1448 	cpu_buffer->pages = next_page;
1449 
1450 	/* update head page */
1451 	if (head_bit)
1452 		cpu_buffer->head_page = list_entry(next_page,
1453 						struct buffer_page, list);
1454 
1455 	/*
1456 	 * change read pointer to make sure any read iterators reset
1457 	 * themselves
1458 	 */
1459 	cpu_buffer->read = 0;
1460 
1461 	/* pages are removed, resume tracing and then free the pages */
1462 	atomic_dec(&cpu_buffer->record_disabled);
1463 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1464 
1465 	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1466 
1467 	/* last buffer page to remove */
1468 	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1469 				list);
1470 	tmp_iter_page = first_page;
1471 
1472 	do {
1473 		to_remove_page = tmp_iter_page;
1474 		rb_inc_page(cpu_buffer, &tmp_iter_page);
1475 
1476 		/* update the counters */
1477 		page_entries = rb_page_entries(to_remove_page);
1478 		if (page_entries) {
1479 			/*
1480 			 * If something was added to this page, it was full
1481 			 * since it is not the tail page. So we deduct the
1482 			 * bytes consumed in ring buffer from here.
1483 			 * Increment overrun to account for the lost events.
1484 			 */
1485 			local_add(page_entries, &cpu_buffer->overrun);
1486 			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1487 		}
1488 
1489 		/*
1490 		 * We have already removed references to this list item, just
1491 		 * free up the buffer_page and its page
1492 		 */
1493 		free_buffer_page(to_remove_page);
1494 		nr_removed--;
1495 
1496 	} while (to_remove_page != last_page);
1497 
1498 	RB_WARN_ON(cpu_buffer, nr_removed);
1499 
1500 	return nr_removed == 0;
1501 }
1502 
1503 static int
1504 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1505 {
1506 	struct list_head *pages = &cpu_buffer->new_pages;
1507 	int retries, success;
1508 
1509 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1510 	/*
1511 	 * We are holding the reader lock, so the reader page won't be swapped
1512 	 * in the ring buffer. Now we are racing with the writer trying to
1513 	 * move head page and the tail page.
1514 	 * We are going to adapt the reader page update process where:
1515 	 * 1. We first splice the start and end of list of new pages between
1516 	 *    the head page and its previous page.
1517 	 * 2. We cmpxchg the prev_page->next to point from head page to the
1518 	 *    start of new pages list.
1519 	 * 3. Finally, we update the head->prev to the end of new list.
1520 	 *
1521 	 * We will try this process 10 times, to make sure that we don't keep
1522 	 * spinning.
1523 	 */
1524 	retries = 10;
1525 	success = 0;
1526 	while (retries--) {
1527 		struct list_head *head_page, *prev_page, *r;
1528 		struct list_head *last_page, *first_page;
1529 		struct list_head *head_page_with_bit;
1530 
1531 		head_page = &rb_set_head_page(cpu_buffer)->list;
1532 		if (!head_page)
1533 			break;
1534 		prev_page = head_page->prev;
1535 
1536 		first_page = pages->next;
1537 		last_page  = pages->prev;
1538 
1539 		head_page_with_bit = (struct list_head *)
1540 				     ((unsigned long)head_page | RB_PAGE_HEAD);
1541 
1542 		last_page->next = head_page_with_bit;
1543 		first_page->prev = prev_page;
1544 
1545 		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1546 
1547 		if (r == head_page_with_bit) {
1548 			/*
1549 			 * yay, we replaced the page pointer to our new list,
1550 			 * now, we just have to update to head page's prev
1551 			 * pointer to point to end of list
1552 			 */
1553 			head_page->prev = last_page;
1554 			success = 1;
1555 			break;
1556 		}
1557 	}
1558 
1559 	if (success)
1560 		INIT_LIST_HEAD(pages);
1561 	/*
1562 	 * If we weren't successful in adding in new pages, warn and stop
1563 	 * tracing
1564 	 */
1565 	RB_WARN_ON(cpu_buffer, !success);
1566 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1567 
1568 	/* free pages if they weren't inserted */
1569 	if (!success) {
1570 		struct buffer_page *bpage, *tmp;
1571 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1572 					 list) {
1573 			list_del_init(&bpage->list);
1574 			free_buffer_page(bpage);
1575 		}
1576 	}
1577 	return success;
1578 }
1579 
1580 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1581 {
1582 	int success;
1583 
1584 	if (cpu_buffer->nr_pages_to_update > 0)
1585 		success = rb_insert_pages(cpu_buffer);
1586 	else
1587 		success = rb_remove_pages(cpu_buffer,
1588 					-cpu_buffer->nr_pages_to_update);
1589 
1590 	if (success)
1591 		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1592 }
1593 
1594 static void update_pages_handler(struct work_struct *work)
1595 {
1596 	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1597 			struct ring_buffer_per_cpu, update_pages_work);
1598 	rb_update_pages(cpu_buffer);
1599 	complete(&cpu_buffer->update_done);
1600 }
1601 
1602 /**
1603  * ring_buffer_resize - resize the ring buffer
1604  * @buffer: the buffer to resize.
1605  * @size: the new size.
1606  * @cpu_id: the cpu buffer to resize
1607  *
1608  * Minimum size is 2 * BUF_PAGE_SIZE.
1609  *
1610  * Returns 0 on success and < 0 on failure.
1611  */
1612 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1613 			int cpu_id)
1614 {
1615 	struct ring_buffer_per_cpu *cpu_buffer;
1616 	unsigned long nr_pages;
1617 	int cpu, err = 0;
1618 
1619 	/*
1620 	 * Always succeed at resizing a non-existent buffer:
1621 	 */
1622 	if (!buffer)
1623 		return size;
1624 
1625 	/* Make sure the requested buffer exists */
1626 	if (cpu_id != RING_BUFFER_ALL_CPUS &&
1627 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
1628 		return size;
1629 
1630 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1631 
1632 	/* we need a minimum of two pages */
1633 	if (nr_pages < 2)
1634 		nr_pages = 2;
1635 
1636 	size = nr_pages * BUF_PAGE_SIZE;
1637 
1638 	/*
1639 	 * Don't succeed if resizing is disabled, as a reader might be
1640 	 * manipulating the ring buffer and is expecting a sane state while
1641 	 * this is true.
1642 	 */
1643 	if (atomic_read(&buffer->resize_disabled))
1644 		return -EBUSY;
1645 
1646 	/* prevent another thread from changing buffer sizes */
1647 	mutex_lock(&buffer->mutex);
1648 
1649 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
1650 		/* calculate the pages to update */
1651 		for_each_buffer_cpu(buffer, cpu) {
1652 			cpu_buffer = buffer->buffers[cpu];
1653 
1654 			cpu_buffer->nr_pages_to_update = nr_pages -
1655 							cpu_buffer->nr_pages;
1656 			/*
1657 			 * nothing more to do for removing pages or no update
1658 			 */
1659 			if (cpu_buffer->nr_pages_to_update <= 0)
1660 				continue;
1661 			/*
1662 			 * to add pages, make sure all new pages can be
1663 			 * allocated without receiving ENOMEM
1664 			 */
1665 			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1666 			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1667 						&cpu_buffer->new_pages, cpu)) {
1668 				/* not enough memory for new pages */
1669 				err = -ENOMEM;
1670 				goto out_err;
1671 			}
1672 		}
1673 
1674 		get_online_cpus();
1675 		/*
1676 		 * Fire off all the required work handlers
1677 		 * We can't schedule on offline CPUs, but it's not necessary
1678 		 * since we can change their buffer sizes without any race.
1679 		 */
1680 		for_each_buffer_cpu(buffer, cpu) {
1681 			cpu_buffer = buffer->buffers[cpu];
1682 			if (!cpu_buffer->nr_pages_to_update)
1683 				continue;
1684 
1685 			/* Can't run something on an offline CPU. */
1686 			if (!cpu_online(cpu)) {
1687 				rb_update_pages(cpu_buffer);
1688 				cpu_buffer->nr_pages_to_update = 0;
1689 			} else {
1690 				schedule_work_on(cpu,
1691 						&cpu_buffer->update_pages_work);
1692 			}
1693 		}
1694 
1695 		/* wait for all the updates to complete */
1696 		for_each_buffer_cpu(buffer, cpu) {
1697 			cpu_buffer = buffer->buffers[cpu];
1698 			if (!cpu_buffer->nr_pages_to_update)
1699 				continue;
1700 
1701 			if (cpu_online(cpu))
1702 				wait_for_completion(&cpu_buffer->update_done);
1703 			cpu_buffer->nr_pages_to_update = 0;
1704 		}
1705 
1706 		put_online_cpus();
1707 	} else {
1708 		/* Make sure this CPU has been intitialized */
1709 		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1710 			goto out;
1711 
1712 		cpu_buffer = buffer->buffers[cpu_id];
1713 
1714 		if (nr_pages == cpu_buffer->nr_pages)
1715 			goto out;
1716 
1717 		cpu_buffer->nr_pages_to_update = nr_pages -
1718 						cpu_buffer->nr_pages;
1719 
1720 		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1721 		if (cpu_buffer->nr_pages_to_update > 0 &&
1722 			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1723 					    &cpu_buffer->new_pages, cpu_id)) {
1724 			err = -ENOMEM;
1725 			goto out_err;
1726 		}
1727 
1728 		get_online_cpus();
1729 
1730 		/* Can't run something on an offline CPU. */
1731 		if (!cpu_online(cpu_id))
1732 			rb_update_pages(cpu_buffer);
1733 		else {
1734 			schedule_work_on(cpu_id,
1735 					 &cpu_buffer->update_pages_work);
1736 			wait_for_completion(&cpu_buffer->update_done);
1737 		}
1738 
1739 		cpu_buffer->nr_pages_to_update = 0;
1740 		put_online_cpus();
1741 	}
1742 
1743  out:
1744 	/*
1745 	 * The ring buffer resize can happen with the ring buffer
1746 	 * enabled, so that the update disturbs the tracing as little
1747 	 * as possible. But if the buffer is disabled, we do not need
1748 	 * to worry about that, and we can take the time to verify
1749 	 * that the buffer is not corrupt.
1750 	 */
1751 	if (atomic_read(&buffer->record_disabled)) {
1752 		atomic_inc(&buffer->record_disabled);
1753 		/*
1754 		 * Even though the buffer was disabled, we must make sure
1755 		 * that it is truly disabled before calling rb_check_pages.
1756 		 * There could have been a race between checking
1757 		 * record_disable and incrementing it.
1758 		 */
1759 		synchronize_sched();
1760 		for_each_buffer_cpu(buffer, cpu) {
1761 			cpu_buffer = buffer->buffers[cpu];
1762 			rb_check_pages(cpu_buffer);
1763 		}
1764 		atomic_dec(&buffer->record_disabled);
1765 	}
1766 
1767 	mutex_unlock(&buffer->mutex);
1768 	return size;
1769 
1770  out_err:
1771 	for_each_buffer_cpu(buffer, cpu) {
1772 		struct buffer_page *bpage, *tmp;
1773 
1774 		cpu_buffer = buffer->buffers[cpu];
1775 		cpu_buffer->nr_pages_to_update = 0;
1776 
1777 		if (list_empty(&cpu_buffer->new_pages))
1778 			continue;
1779 
1780 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1781 					list) {
1782 			list_del_init(&bpage->list);
1783 			free_buffer_page(bpage);
1784 		}
1785 	}
1786 	mutex_unlock(&buffer->mutex);
1787 	return err;
1788 }
1789 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1790 
1791 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1792 {
1793 	mutex_lock(&buffer->mutex);
1794 	if (val)
1795 		buffer->flags |= RB_FL_OVERWRITE;
1796 	else
1797 		buffer->flags &= ~RB_FL_OVERWRITE;
1798 	mutex_unlock(&buffer->mutex);
1799 }
1800 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1801 
1802 static __always_inline void *
1803 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1804 {
1805 	return bpage->data + index;
1806 }
1807 
1808 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1809 {
1810 	return bpage->page->data + index;
1811 }
1812 
1813 static __always_inline struct ring_buffer_event *
1814 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1815 {
1816 	return __rb_page_index(cpu_buffer->reader_page,
1817 			       cpu_buffer->reader_page->read);
1818 }
1819 
1820 static __always_inline struct ring_buffer_event *
1821 rb_iter_head_event(struct ring_buffer_iter *iter)
1822 {
1823 	return __rb_page_index(iter->head_page, iter->head);
1824 }
1825 
1826 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
1827 {
1828 	return local_read(&bpage->page->commit);
1829 }
1830 
1831 /* Size is determined by what has been committed */
1832 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
1833 {
1834 	return rb_page_commit(bpage);
1835 }
1836 
1837 static __always_inline unsigned
1838 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1839 {
1840 	return rb_page_commit(cpu_buffer->commit_page);
1841 }
1842 
1843 static __always_inline unsigned
1844 rb_event_index(struct ring_buffer_event *event)
1845 {
1846 	unsigned long addr = (unsigned long)event;
1847 
1848 	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1849 }
1850 
1851 static void rb_inc_iter(struct ring_buffer_iter *iter)
1852 {
1853 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1854 
1855 	/*
1856 	 * The iterator could be on the reader page (it starts there).
1857 	 * But the head could have moved, since the reader was
1858 	 * found. Check for this case and assign the iterator
1859 	 * to the head page instead of next.
1860 	 */
1861 	if (iter->head_page == cpu_buffer->reader_page)
1862 		iter->head_page = rb_set_head_page(cpu_buffer);
1863 	else
1864 		rb_inc_page(cpu_buffer, &iter->head_page);
1865 
1866 	iter->read_stamp = iter->head_page->page->time_stamp;
1867 	iter->head = 0;
1868 }
1869 
1870 /*
1871  * rb_handle_head_page - writer hit the head page
1872  *
1873  * Returns: +1 to retry page
1874  *           0 to continue
1875  *          -1 on error
1876  */
1877 static int
1878 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1879 		    struct buffer_page *tail_page,
1880 		    struct buffer_page *next_page)
1881 {
1882 	struct buffer_page *new_head;
1883 	int entries;
1884 	int type;
1885 	int ret;
1886 
1887 	entries = rb_page_entries(next_page);
1888 
1889 	/*
1890 	 * The hard part is here. We need to move the head
1891 	 * forward, and protect against both readers on
1892 	 * other CPUs and writers coming in via interrupts.
1893 	 */
1894 	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1895 				       RB_PAGE_HEAD);
1896 
1897 	/*
1898 	 * type can be one of four:
1899 	 *  NORMAL - an interrupt already moved it for us
1900 	 *  HEAD   - we are the first to get here.
1901 	 *  UPDATE - we are the interrupt interrupting
1902 	 *           a current move.
1903 	 *  MOVED  - a reader on another CPU moved the next
1904 	 *           pointer to its reader page. Give up
1905 	 *           and try again.
1906 	 */
1907 
1908 	switch (type) {
1909 	case RB_PAGE_HEAD:
1910 		/*
1911 		 * We changed the head to UPDATE, thus
1912 		 * it is our responsibility to update
1913 		 * the counters.
1914 		 */
1915 		local_add(entries, &cpu_buffer->overrun);
1916 		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1917 
1918 		/*
1919 		 * The entries will be zeroed out when we move the
1920 		 * tail page.
1921 		 */
1922 
1923 		/* still more to do */
1924 		break;
1925 
1926 	case RB_PAGE_UPDATE:
1927 		/*
1928 		 * This is an interrupt that interrupt the
1929 		 * previous update. Still more to do.
1930 		 */
1931 		break;
1932 	case RB_PAGE_NORMAL:
1933 		/*
1934 		 * An interrupt came in before the update
1935 		 * and processed this for us.
1936 		 * Nothing left to do.
1937 		 */
1938 		return 1;
1939 	case RB_PAGE_MOVED:
1940 		/*
1941 		 * The reader is on another CPU and just did
1942 		 * a swap with our next_page.
1943 		 * Try again.
1944 		 */
1945 		return 1;
1946 	default:
1947 		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1948 		return -1;
1949 	}
1950 
1951 	/*
1952 	 * Now that we are here, the old head pointer is
1953 	 * set to UPDATE. This will keep the reader from
1954 	 * swapping the head page with the reader page.
1955 	 * The reader (on another CPU) will spin till
1956 	 * we are finished.
1957 	 *
1958 	 * We just need to protect against interrupts
1959 	 * doing the job. We will set the next pointer
1960 	 * to HEAD. After that, we set the old pointer
1961 	 * to NORMAL, but only if it was HEAD before.
1962 	 * otherwise we are an interrupt, and only
1963 	 * want the outer most commit to reset it.
1964 	 */
1965 	new_head = next_page;
1966 	rb_inc_page(cpu_buffer, &new_head);
1967 
1968 	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1969 				    RB_PAGE_NORMAL);
1970 
1971 	/*
1972 	 * Valid returns are:
1973 	 *  HEAD   - an interrupt came in and already set it.
1974 	 *  NORMAL - One of two things:
1975 	 *            1) We really set it.
1976 	 *            2) A bunch of interrupts came in and moved
1977 	 *               the page forward again.
1978 	 */
1979 	switch (ret) {
1980 	case RB_PAGE_HEAD:
1981 	case RB_PAGE_NORMAL:
1982 		/* OK */
1983 		break;
1984 	default:
1985 		RB_WARN_ON(cpu_buffer, 1);
1986 		return -1;
1987 	}
1988 
1989 	/*
1990 	 * It is possible that an interrupt came in,
1991 	 * set the head up, then more interrupts came in
1992 	 * and moved it again. When we get back here,
1993 	 * the page would have been set to NORMAL but we
1994 	 * just set it back to HEAD.
1995 	 *
1996 	 * How do you detect this? Well, if that happened
1997 	 * the tail page would have moved.
1998 	 */
1999 	if (ret == RB_PAGE_NORMAL) {
2000 		struct buffer_page *buffer_tail_page;
2001 
2002 		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2003 		/*
2004 		 * If the tail had moved passed next, then we need
2005 		 * to reset the pointer.
2006 		 */
2007 		if (buffer_tail_page != tail_page &&
2008 		    buffer_tail_page != next_page)
2009 			rb_head_page_set_normal(cpu_buffer, new_head,
2010 						next_page,
2011 						RB_PAGE_HEAD);
2012 	}
2013 
2014 	/*
2015 	 * If this was the outer most commit (the one that
2016 	 * changed the original pointer from HEAD to UPDATE),
2017 	 * then it is up to us to reset it to NORMAL.
2018 	 */
2019 	if (type == RB_PAGE_HEAD) {
2020 		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2021 					      tail_page,
2022 					      RB_PAGE_UPDATE);
2023 		if (RB_WARN_ON(cpu_buffer,
2024 			       ret != RB_PAGE_UPDATE))
2025 			return -1;
2026 	}
2027 
2028 	return 0;
2029 }
2030 
2031 static inline void
2032 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2033 	      unsigned long tail, struct rb_event_info *info)
2034 {
2035 	struct buffer_page *tail_page = info->tail_page;
2036 	struct ring_buffer_event *event;
2037 	unsigned long length = info->length;
2038 
2039 	/*
2040 	 * Only the event that crossed the page boundary
2041 	 * must fill the old tail_page with padding.
2042 	 */
2043 	if (tail >= BUF_PAGE_SIZE) {
2044 		/*
2045 		 * If the page was filled, then we still need
2046 		 * to update the real_end. Reset it to zero
2047 		 * and the reader will ignore it.
2048 		 */
2049 		if (tail == BUF_PAGE_SIZE)
2050 			tail_page->real_end = 0;
2051 
2052 		local_sub(length, &tail_page->write);
2053 		return;
2054 	}
2055 
2056 	event = __rb_page_index(tail_page, tail);
2057 	kmemcheck_annotate_bitfield(event, bitfield);
2058 
2059 	/* account for padding bytes */
2060 	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2061 
2062 	/*
2063 	 * Save the original length to the meta data.
2064 	 * This will be used by the reader to add lost event
2065 	 * counter.
2066 	 */
2067 	tail_page->real_end = tail;
2068 
2069 	/*
2070 	 * If this event is bigger than the minimum size, then
2071 	 * we need to be careful that we don't subtract the
2072 	 * write counter enough to allow another writer to slip
2073 	 * in on this page.
2074 	 * We put in a discarded commit instead, to make sure
2075 	 * that this space is not used again.
2076 	 *
2077 	 * If we are less than the minimum size, we don't need to
2078 	 * worry about it.
2079 	 */
2080 	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2081 		/* No room for any events */
2082 
2083 		/* Mark the rest of the page with padding */
2084 		rb_event_set_padding(event);
2085 
2086 		/* Set the write back to the previous setting */
2087 		local_sub(length, &tail_page->write);
2088 		return;
2089 	}
2090 
2091 	/* Put in a discarded event */
2092 	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2093 	event->type_len = RINGBUF_TYPE_PADDING;
2094 	/* time delta must be non zero */
2095 	event->time_delta = 1;
2096 
2097 	/* Set write to end of buffer */
2098 	length = (tail + length) - BUF_PAGE_SIZE;
2099 	local_sub(length, &tail_page->write);
2100 }
2101 
2102 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2103 
2104 /*
2105  * This is the slow path, force gcc not to inline it.
2106  */
2107 static noinline struct ring_buffer_event *
2108 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2109 	     unsigned long tail, struct rb_event_info *info)
2110 {
2111 	struct buffer_page *tail_page = info->tail_page;
2112 	struct buffer_page *commit_page = cpu_buffer->commit_page;
2113 	struct ring_buffer *buffer = cpu_buffer->buffer;
2114 	struct buffer_page *next_page;
2115 	int ret;
2116 
2117 	next_page = tail_page;
2118 
2119 	rb_inc_page(cpu_buffer, &next_page);
2120 
2121 	/*
2122 	 * If for some reason, we had an interrupt storm that made
2123 	 * it all the way around the buffer, bail, and warn
2124 	 * about it.
2125 	 */
2126 	if (unlikely(next_page == commit_page)) {
2127 		local_inc(&cpu_buffer->commit_overrun);
2128 		goto out_reset;
2129 	}
2130 
2131 	/*
2132 	 * This is where the fun begins!
2133 	 *
2134 	 * We are fighting against races between a reader that
2135 	 * could be on another CPU trying to swap its reader
2136 	 * page with the buffer head.
2137 	 *
2138 	 * We are also fighting against interrupts coming in and
2139 	 * moving the head or tail on us as well.
2140 	 *
2141 	 * If the next page is the head page then we have filled
2142 	 * the buffer, unless the commit page is still on the
2143 	 * reader page.
2144 	 */
2145 	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2146 
2147 		/*
2148 		 * If the commit is not on the reader page, then
2149 		 * move the header page.
2150 		 */
2151 		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2152 			/*
2153 			 * If we are not in overwrite mode,
2154 			 * this is easy, just stop here.
2155 			 */
2156 			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2157 				local_inc(&cpu_buffer->dropped_events);
2158 				goto out_reset;
2159 			}
2160 
2161 			ret = rb_handle_head_page(cpu_buffer,
2162 						  tail_page,
2163 						  next_page);
2164 			if (ret < 0)
2165 				goto out_reset;
2166 			if (ret)
2167 				goto out_again;
2168 		} else {
2169 			/*
2170 			 * We need to be careful here too. The
2171 			 * commit page could still be on the reader
2172 			 * page. We could have a small buffer, and
2173 			 * have filled up the buffer with events
2174 			 * from interrupts and such, and wrapped.
2175 			 *
2176 			 * Note, if the tail page is also the on the
2177 			 * reader_page, we let it move out.
2178 			 */
2179 			if (unlikely((cpu_buffer->commit_page !=
2180 				      cpu_buffer->tail_page) &&
2181 				     (cpu_buffer->commit_page ==
2182 				      cpu_buffer->reader_page))) {
2183 				local_inc(&cpu_buffer->commit_overrun);
2184 				goto out_reset;
2185 			}
2186 		}
2187 	}
2188 
2189 	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2190 
2191  out_again:
2192 
2193 	rb_reset_tail(cpu_buffer, tail, info);
2194 
2195 	/* Commit what we have for now. */
2196 	rb_end_commit(cpu_buffer);
2197 	/* rb_end_commit() decs committing */
2198 	local_inc(&cpu_buffer->committing);
2199 
2200 	/* fail and let the caller try again */
2201 	return ERR_PTR(-EAGAIN);
2202 
2203  out_reset:
2204 	/* reset write */
2205 	rb_reset_tail(cpu_buffer, tail, info);
2206 
2207 	return NULL;
2208 }
2209 
2210 /* Slow path, do not inline */
2211 static noinline struct ring_buffer_event *
2212 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
2213 {
2214 	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2215 
2216 	/* Not the first event on the page? */
2217 	if (rb_event_index(event)) {
2218 		event->time_delta = delta & TS_MASK;
2219 		event->array[0] = delta >> TS_SHIFT;
2220 	} else {
2221 		/* nope, just zero it */
2222 		event->time_delta = 0;
2223 		event->array[0] = 0;
2224 	}
2225 
2226 	return skip_time_extend(event);
2227 }
2228 
2229 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2230 				     struct ring_buffer_event *event);
2231 
2232 /**
2233  * rb_update_event - update event type and data
2234  * @event: the event to update
2235  * @type: the type of event
2236  * @length: the size of the event field in the ring buffer
2237  *
2238  * Update the type and data fields of the event. The length
2239  * is the actual size that is written to the ring buffer,
2240  * and with this, we can determine what to place into the
2241  * data field.
2242  */
2243 static void
2244 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2245 		struct ring_buffer_event *event,
2246 		struct rb_event_info *info)
2247 {
2248 	unsigned length = info->length;
2249 	u64 delta = info->delta;
2250 
2251 	/* Only a commit updates the timestamp */
2252 	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2253 		delta = 0;
2254 
2255 	/*
2256 	 * If we need to add a timestamp, then we
2257 	 * add it to the start of the resevered space.
2258 	 */
2259 	if (unlikely(info->add_timestamp)) {
2260 		event = rb_add_time_stamp(event, delta);
2261 		length -= RB_LEN_TIME_EXTEND;
2262 		delta = 0;
2263 	}
2264 
2265 	event->time_delta = delta;
2266 	length -= RB_EVNT_HDR_SIZE;
2267 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2268 		event->type_len = 0;
2269 		event->array[0] = length;
2270 	} else
2271 		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2272 }
2273 
2274 static unsigned rb_calculate_event_length(unsigned length)
2275 {
2276 	struct ring_buffer_event event; /* Used only for sizeof array */
2277 
2278 	/* zero length can cause confusions */
2279 	if (!length)
2280 		length++;
2281 
2282 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2283 		length += sizeof(event.array[0]);
2284 
2285 	length += RB_EVNT_HDR_SIZE;
2286 	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2287 
2288 	/*
2289 	 * In case the time delta is larger than the 27 bits for it
2290 	 * in the header, we need to add a timestamp. If another
2291 	 * event comes in when trying to discard this one to increase
2292 	 * the length, then the timestamp will be added in the allocated
2293 	 * space of this event. If length is bigger than the size needed
2294 	 * for the TIME_EXTEND, then padding has to be used. The events
2295 	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2296 	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2297 	 * As length is a multiple of 4, we only need to worry if it
2298 	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2299 	 */
2300 	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2301 		length += RB_ALIGNMENT;
2302 
2303 	return length;
2304 }
2305 
2306 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2307 static inline bool sched_clock_stable(void)
2308 {
2309 	return true;
2310 }
2311 #endif
2312 
2313 static inline int
2314 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2315 		  struct ring_buffer_event *event)
2316 {
2317 	unsigned long new_index, old_index;
2318 	struct buffer_page *bpage;
2319 	unsigned long index;
2320 	unsigned long addr;
2321 
2322 	new_index = rb_event_index(event);
2323 	old_index = new_index + rb_event_ts_length(event);
2324 	addr = (unsigned long)event;
2325 	addr &= PAGE_MASK;
2326 
2327 	bpage = READ_ONCE(cpu_buffer->tail_page);
2328 
2329 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2330 		unsigned long write_mask =
2331 			local_read(&bpage->write) & ~RB_WRITE_MASK;
2332 		unsigned long event_length = rb_event_length(event);
2333 		/*
2334 		 * This is on the tail page. It is possible that
2335 		 * a write could come in and move the tail page
2336 		 * and write to the next page. That is fine
2337 		 * because we just shorten what is on this page.
2338 		 */
2339 		old_index += write_mask;
2340 		new_index += write_mask;
2341 		index = local_cmpxchg(&bpage->write, old_index, new_index);
2342 		if (index == old_index) {
2343 			/* update counters */
2344 			local_sub(event_length, &cpu_buffer->entries_bytes);
2345 			return 1;
2346 		}
2347 	}
2348 
2349 	/* could not discard */
2350 	return 0;
2351 }
2352 
2353 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2354 {
2355 	local_inc(&cpu_buffer->committing);
2356 	local_inc(&cpu_buffer->commits);
2357 }
2358 
2359 static __always_inline void
2360 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2361 {
2362 	unsigned long max_count;
2363 
2364 	/*
2365 	 * We only race with interrupts and NMIs on this CPU.
2366 	 * If we own the commit event, then we can commit
2367 	 * all others that interrupted us, since the interruptions
2368 	 * are in stack format (they finish before they come
2369 	 * back to us). This allows us to do a simple loop to
2370 	 * assign the commit to the tail.
2371 	 */
2372  again:
2373 	max_count = cpu_buffer->nr_pages * 100;
2374 
2375 	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2376 		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2377 			return;
2378 		if (RB_WARN_ON(cpu_buffer,
2379 			       rb_is_reader_page(cpu_buffer->tail_page)))
2380 			return;
2381 		local_set(&cpu_buffer->commit_page->page->commit,
2382 			  rb_page_write(cpu_buffer->commit_page));
2383 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2384 		/* Only update the write stamp if the page has an event */
2385 		if (rb_page_write(cpu_buffer->commit_page))
2386 			cpu_buffer->write_stamp =
2387 				cpu_buffer->commit_page->page->time_stamp;
2388 		/* add barrier to keep gcc from optimizing too much */
2389 		barrier();
2390 	}
2391 	while (rb_commit_index(cpu_buffer) !=
2392 	       rb_page_write(cpu_buffer->commit_page)) {
2393 
2394 		local_set(&cpu_buffer->commit_page->page->commit,
2395 			  rb_page_write(cpu_buffer->commit_page));
2396 		RB_WARN_ON(cpu_buffer,
2397 			   local_read(&cpu_buffer->commit_page->page->commit) &
2398 			   ~RB_WRITE_MASK);
2399 		barrier();
2400 	}
2401 
2402 	/* again, keep gcc from optimizing */
2403 	barrier();
2404 
2405 	/*
2406 	 * If an interrupt came in just after the first while loop
2407 	 * and pushed the tail page forward, we will be left with
2408 	 * a dangling commit that will never go forward.
2409 	 */
2410 	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2411 		goto again;
2412 }
2413 
2414 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2415 {
2416 	unsigned long commits;
2417 
2418 	if (RB_WARN_ON(cpu_buffer,
2419 		       !local_read(&cpu_buffer->committing)))
2420 		return;
2421 
2422  again:
2423 	commits = local_read(&cpu_buffer->commits);
2424 	/* synchronize with interrupts */
2425 	barrier();
2426 	if (local_read(&cpu_buffer->committing) == 1)
2427 		rb_set_commit_to_write(cpu_buffer);
2428 
2429 	local_dec(&cpu_buffer->committing);
2430 
2431 	/* synchronize with interrupts */
2432 	barrier();
2433 
2434 	/*
2435 	 * Need to account for interrupts coming in between the
2436 	 * updating of the commit page and the clearing of the
2437 	 * committing counter.
2438 	 */
2439 	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2440 	    !local_read(&cpu_buffer->committing)) {
2441 		local_inc(&cpu_buffer->committing);
2442 		goto again;
2443 	}
2444 }
2445 
2446 static inline void rb_event_discard(struct ring_buffer_event *event)
2447 {
2448 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2449 		event = skip_time_extend(event);
2450 
2451 	/* array[0] holds the actual length for the discarded event */
2452 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2453 	event->type_len = RINGBUF_TYPE_PADDING;
2454 	/* time delta must be non zero */
2455 	if (!event->time_delta)
2456 		event->time_delta = 1;
2457 }
2458 
2459 static __always_inline bool
2460 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2461 		   struct ring_buffer_event *event)
2462 {
2463 	unsigned long addr = (unsigned long)event;
2464 	unsigned long index;
2465 
2466 	index = rb_event_index(event);
2467 	addr &= PAGE_MASK;
2468 
2469 	return cpu_buffer->commit_page->page == (void *)addr &&
2470 		rb_commit_index(cpu_buffer) == index;
2471 }
2472 
2473 static __always_inline void
2474 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2475 		      struct ring_buffer_event *event)
2476 {
2477 	u64 delta;
2478 
2479 	/*
2480 	 * The event first in the commit queue updates the
2481 	 * time stamp.
2482 	 */
2483 	if (rb_event_is_commit(cpu_buffer, event)) {
2484 		/*
2485 		 * A commit event that is first on a page
2486 		 * updates the write timestamp with the page stamp
2487 		 */
2488 		if (!rb_event_index(event))
2489 			cpu_buffer->write_stamp =
2490 				cpu_buffer->commit_page->page->time_stamp;
2491 		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2492 			delta = event->array[0];
2493 			delta <<= TS_SHIFT;
2494 			delta += event->time_delta;
2495 			cpu_buffer->write_stamp += delta;
2496 		} else
2497 			cpu_buffer->write_stamp += event->time_delta;
2498 	}
2499 }
2500 
2501 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2502 		      struct ring_buffer_event *event)
2503 {
2504 	local_inc(&cpu_buffer->entries);
2505 	rb_update_write_stamp(cpu_buffer, event);
2506 	rb_end_commit(cpu_buffer);
2507 }
2508 
2509 static __always_inline void
2510 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2511 {
2512 	bool pagebusy;
2513 
2514 	if (buffer->irq_work.waiters_pending) {
2515 		buffer->irq_work.waiters_pending = false;
2516 		/* irq_work_queue() supplies it's own memory barriers */
2517 		irq_work_queue(&buffer->irq_work.work);
2518 	}
2519 
2520 	if (cpu_buffer->irq_work.waiters_pending) {
2521 		cpu_buffer->irq_work.waiters_pending = false;
2522 		/* irq_work_queue() supplies it's own memory barriers */
2523 		irq_work_queue(&cpu_buffer->irq_work.work);
2524 	}
2525 
2526 	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
2527 
2528 	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
2529 		cpu_buffer->irq_work.wakeup_full = true;
2530 		cpu_buffer->irq_work.full_waiters_pending = false;
2531 		/* irq_work_queue() supplies it's own memory barriers */
2532 		irq_work_queue(&cpu_buffer->irq_work.work);
2533 	}
2534 }
2535 
2536 /*
2537  * The lock and unlock are done within a preempt disable section.
2538  * The current_context per_cpu variable can only be modified
2539  * by the current task between lock and unlock. But it can
2540  * be modified more than once via an interrupt. To pass this
2541  * information from the lock to the unlock without having to
2542  * access the 'in_interrupt()' functions again (which do show
2543  * a bit of overhead in something as critical as function tracing,
2544  * we use a bitmask trick.
2545  *
2546  *  bit 0 =  NMI context
2547  *  bit 1 =  IRQ context
2548  *  bit 2 =  SoftIRQ context
2549  *  bit 3 =  normal context.
2550  *
2551  * This works because this is the order of contexts that can
2552  * preempt other contexts. A SoftIRQ never preempts an IRQ
2553  * context.
2554  *
2555  * When the context is determined, the corresponding bit is
2556  * checked and set (if it was set, then a recursion of that context
2557  * happened).
2558  *
2559  * On unlock, we need to clear this bit. To do so, just subtract
2560  * 1 from the current_context and AND it to itself.
2561  *
2562  * (binary)
2563  *  101 - 1 = 100
2564  *  101 & 100 = 100 (clearing bit zero)
2565  *
2566  *  1010 - 1 = 1001
2567  *  1010 & 1001 = 1000 (clearing bit 1)
2568  *
2569  * The least significant bit can be cleared this way, and it
2570  * just so happens that it is the same bit corresponding to
2571  * the current context.
2572  */
2573 
2574 static __always_inline int
2575 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2576 {
2577 	unsigned int val = cpu_buffer->current_context;
2578 	int bit;
2579 
2580 	if (in_interrupt()) {
2581 		if (in_nmi())
2582 			bit = RB_CTX_NMI;
2583 		else if (in_irq())
2584 			bit = RB_CTX_IRQ;
2585 		else
2586 			bit = RB_CTX_SOFTIRQ;
2587 	} else
2588 		bit = RB_CTX_NORMAL;
2589 
2590 	if (unlikely(val & (1 << bit)))
2591 		return 1;
2592 
2593 	val |= (1 << bit);
2594 	cpu_buffer->current_context = val;
2595 
2596 	return 0;
2597 }
2598 
2599 static __always_inline void
2600 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2601 {
2602 	cpu_buffer->current_context &= cpu_buffer->current_context - 1;
2603 }
2604 
2605 /**
2606  * ring_buffer_unlock_commit - commit a reserved
2607  * @buffer: The buffer to commit to
2608  * @event: The event pointer to commit.
2609  *
2610  * This commits the data to the ring buffer, and releases any locks held.
2611  *
2612  * Must be paired with ring_buffer_lock_reserve.
2613  */
2614 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2615 			      struct ring_buffer_event *event)
2616 {
2617 	struct ring_buffer_per_cpu *cpu_buffer;
2618 	int cpu = raw_smp_processor_id();
2619 
2620 	cpu_buffer = buffer->buffers[cpu];
2621 
2622 	rb_commit(cpu_buffer, event);
2623 
2624 	rb_wakeups(buffer, cpu_buffer);
2625 
2626 	trace_recursive_unlock(cpu_buffer);
2627 
2628 	preempt_enable_notrace();
2629 
2630 	return 0;
2631 }
2632 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2633 
2634 static noinline void
2635 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2636 		    struct rb_event_info *info)
2637 {
2638 	WARN_ONCE(info->delta > (1ULL << 59),
2639 		  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2640 		  (unsigned long long)info->delta,
2641 		  (unsigned long long)info->ts,
2642 		  (unsigned long long)cpu_buffer->write_stamp,
2643 		  sched_clock_stable() ? "" :
2644 		  "If you just came from a suspend/resume,\n"
2645 		  "please switch to the trace global clock:\n"
2646 		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2647 	info->add_timestamp = 1;
2648 }
2649 
2650 static struct ring_buffer_event *
2651 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2652 		  struct rb_event_info *info)
2653 {
2654 	struct ring_buffer_event *event;
2655 	struct buffer_page *tail_page;
2656 	unsigned long tail, write;
2657 
2658 	/*
2659 	 * If the time delta since the last event is too big to
2660 	 * hold in the time field of the event, then we append a
2661 	 * TIME EXTEND event ahead of the data event.
2662 	 */
2663 	if (unlikely(info->add_timestamp))
2664 		info->length += RB_LEN_TIME_EXTEND;
2665 
2666 	/* Don't let the compiler play games with cpu_buffer->tail_page */
2667 	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2668 	write = local_add_return(info->length, &tail_page->write);
2669 
2670 	/* set write to only the index of the write */
2671 	write &= RB_WRITE_MASK;
2672 	tail = write - info->length;
2673 
2674 	/*
2675 	 * If this is the first commit on the page, then it has the same
2676 	 * timestamp as the page itself.
2677 	 */
2678 	if (!tail)
2679 		info->delta = 0;
2680 
2681 	/* See if we shot pass the end of this buffer page */
2682 	if (unlikely(write > BUF_PAGE_SIZE))
2683 		return rb_move_tail(cpu_buffer, tail, info);
2684 
2685 	/* We reserved something on the buffer */
2686 
2687 	event = __rb_page_index(tail_page, tail);
2688 	kmemcheck_annotate_bitfield(event, bitfield);
2689 	rb_update_event(cpu_buffer, event, info);
2690 
2691 	local_inc(&tail_page->entries);
2692 
2693 	/*
2694 	 * If this is the first commit on the page, then update
2695 	 * its timestamp.
2696 	 */
2697 	if (!tail)
2698 		tail_page->page->time_stamp = info->ts;
2699 
2700 	/* account for these added bytes */
2701 	local_add(info->length, &cpu_buffer->entries_bytes);
2702 
2703 	return event;
2704 }
2705 
2706 static __always_inline struct ring_buffer_event *
2707 rb_reserve_next_event(struct ring_buffer *buffer,
2708 		      struct ring_buffer_per_cpu *cpu_buffer,
2709 		      unsigned long length)
2710 {
2711 	struct ring_buffer_event *event;
2712 	struct rb_event_info info;
2713 	int nr_loops = 0;
2714 	u64 diff;
2715 
2716 	rb_start_commit(cpu_buffer);
2717 
2718 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2719 	/*
2720 	 * Due to the ability to swap a cpu buffer from a buffer
2721 	 * it is possible it was swapped before we committed.
2722 	 * (committing stops a swap). We check for it here and
2723 	 * if it happened, we have to fail the write.
2724 	 */
2725 	barrier();
2726 	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2727 		local_dec(&cpu_buffer->committing);
2728 		local_dec(&cpu_buffer->commits);
2729 		return NULL;
2730 	}
2731 #endif
2732 
2733 	info.length = rb_calculate_event_length(length);
2734  again:
2735 	info.add_timestamp = 0;
2736 	info.delta = 0;
2737 
2738 	/*
2739 	 * We allow for interrupts to reenter here and do a trace.
2740 	 * If one does, it will cause this original code to loop
2741 	 * back here. Even with heavy interrupts happening, this
2742 	 * should only happen a few times in a row. If this happens
2743 	 * 1000 times in a row, there must be either an interrupt
2744 	 * storm or we have something buggy.
2745 	 * Bail!
2746 	 */
2747 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2748 		goto out_fail;
2749 
2750 	info.ts = rb_time_stamp(cpu_buffer->buffer);
2751 	diff = info.ts - cpu_buffer->write_stamp;
2752 
2753 	/* make sure this diff is calculated here */
2754 	barrier();
2755 
2756 	/* Did the write stamp get updated already? */
2757 	if (likely(info.ts >= cpu_buffer->write_stamp)) {
2758 		info.delta = diff;
2759 		if (unlikely(test_time_stamp(info.delta)))
2760 			rb_handle_timestamp(cpu_buffer, &info);
2761 	}
2762 
2763 	event = __rb_reserve_next(cpu_buffer, &info);
2764 
2765 	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2766 		if (info.add_timestamp)
2767 			info.length -= RB_LEN_TIME_EXTEND;
2768 		goto again;
2769 	}
2770 
2771 	if (!event)
2772 		goto out_fail;
2773 
2774 	return event;
2775 
2776  out_fail:
2777 	rb_end_commit(cpu_buffer);
2778 	return NULL;
2779 }
2780 
2781 /**
2782  * ring_buffer_lock_reserve - reserve a part of the buffer
2783  * @buffer: the ring buffer to reserve from
2784  * @length: the length of the data to reserve (excluding event header)
2785  *
2786  * Returns a reseverd event on the ring buffer to copy directly to.
2787  * The user of this interface will need to get the body to write into
2788  * and can use the ring_buffer_event_data() interface.
2789  *
2790  * The length is the length of the data needed, not the event length
2791  * which also includes the event header.
2792  *
2793  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2794  * If NULL is returned, then nothing has been allocated or locked.
2795  */
2796 struct ring_buffer_event *
2797 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2798 {
2799 	struct ring_buffer_per_cpu *cpu_buffer;
2800 	struct ring_buffer_event *event;
2801 	int cpu;
2802 
2803 	/* If we are tracing schedule, we don't want to recurse */
2804 	preempt_disable_notrace();
2805 
2806 	if (unlikely(atomic_read(&buffer->record_disabled)))
2807 		goto out;
2808 
2809 	cpu = raw_smp_processor_id();
2810 
2811 	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2812 		goto out;
2813 
2814 	cpu_buffer = buffer->buffers[cpu];
2815 
2816 	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2817 		goto out;
2818 
2819 	if (unlikely(length > BUF_MAX_DATA_SIZE))
2820 		goto out;
2821 
2822 	if (unlikely(trace_recursive_lock(cpu_buffer)))
2823 		goto out;
2824 
2825 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2826 	if (!event)
2827 		goto out_unlock;
2828 
2829 	return event;
2830 
2831  out_unlock:
2832 	trace_recursive_unlock(cpu_buffer);
2833  out:
2834 	preempt_enable_notrace();
2835 	return NULL;
2836 }
2837 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2838 
2839 /*
2840  * Decrement the entries to the page that an event is on.
2841  * The event does not even need to exist, only the pointer
2842  * to the page it is on. This may only be called before the commit
2843  * takes place.
2844  */
2845 static inline void
2846 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2847 		   struct ring_buffer_event *event)
2848 {
2849 	unsigned long addr = (unsigned long)event;
2850 	struct buffer_page *bpage = cpu_buffer->commit_page;
2851 	struct buffer_page *start;
2852 
2853 	addr &= PAGE_MASK;
2854 
2855 	/* Do the likely case first */
2856 	if (likely(bpage->page == (void *)addr)) {
2857 		local_dec(&bpage->entries);
2858 		return;
2859 	}
2860 
2861 	/*
2862 	 * Because the commit page may be on the reader page we
2863 	 * start with the next page and check the end loop there.
2864 	 */
2865 	rb_inc_page(cpu_buffer, &bpage);
2866 	start = bpage;
2867 	do {
2868 		if (bpage->page == (void *)addr) {
2869 			local_dec(&bpage->entries);
2870 			return;
2871 		}
2872 		rb_inc_page(cpu_buffer, &bpage);
2873 	} while (bpage != start);
2874 
2875 	/* commit not part of this buffer?? */
2876 	RB_WARN_ON(cpu_buffer, 1);
2877 }
2878 
2879 /**
2880  * ring_buffer_commit_discard - discard an event that has not been committed
2881  * @buffer: the ring buffer
2882  * @event: non committed event to discard
2883  *
2884  * Sometimes an event that is in the ring buffer needs to be ignored.
2885  * This function lets the user discard an event in the ring buffer
2886  * and then that event will not be read later.
2887  *
2888  * This function only works if it is called before the the item has been
2889  * committed. It will try to free the event from the ring buffer
2890  * if another event has not been added behind it.
2891  *
2892  * If another event has been added behind it, it will set the event
2893  * up as discarded, and perform the commit.
2894  *
2895  * If this function is called, do not call ring_buffer_unlock_commit on
2896  * the event.
2897  */
2898 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2899 				struct ring_buffer_event *event)
2900 {
2901 	struct ring_buffer_per_cpu *cpu_buffer;
2902 	int cpu;
2903 
2904 	/* The event is discarded regardless */
2905 	rb_event_discard(event);
2906 
2907 	cpu = smp_processor_id();
2908 	cpu_buffer = buffer->buffers[cpu];
2909 
2910 	/*
2911 	 * This must only be called if the event has not been
2912 	 * committed yet. Thus we can assume that preemption
2913 	 * is still disabled.
2914 	 */
2915 	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2916 
2917 	rb_decrement_entry(cpu_buffer, event);
2918 	if (rb_try_to_discard(cpu_buffer, event))
2919 		goto out;
2920 
2921 	/*
2922 	 * The commit is still visible by the reader, so we
2923 	 * must still update the timestamp.
2924 	 */
2925 	rb_update_write_stamp(cpu_buffer, event);
2926  out:
2927 	rb_end_commit(cpu_buffer);
2928 
2929 	trace_recursive_unlock(cpu_buffer);
2930 
2931 	preempt_enable_notrace();
2932 
2933 }
2934 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2935 
2936 /**
2937  * ring_buffer_write - write data to the buffer without reserving
2938  * @buffer: The ring buffer to write to.
2939  * @length: The length of the data being written (excluding the event header)
2940  * @data: The data to write to the buffer.
2941  *
2942  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2943  * one function. If you already have the data to write to the buffer, it
2944  * may be easier to simply call this function.
2945  *
2946  * Note, like ring_buffer_lock_reserve, the length is the length of the data
2947  * and not the length of the event which would hold the header.
2948  */
2949 int ring_buffer_write(struct ring_buffer *buffer,
2950 		      unsigned long length,
2951 		      void *data)
2952 {
2953 	struct ring_buffer_per_cpu *cpu_buffer;
2954 	struct ring_buffer_event *event;
2955 	void *body;
2956 	int ret = -EBUSY;
2957 	int cpu;
2958 
2959 	preempt_disable_notrace();
2960 
2961 	if (atomic_read(&buffer->record_disabled))
2962 		goto out;
2963 
2964 	cpu = raw_smp_processor_id();
2965 
2966 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2967 		goto out;
2968 
2969 	cpu_buffer = buffer->buffers[cpu];
2970 
2971 	if (atomic_read(&cpu_buffer->record_disabled))
2972 		goto out;
2973 
2974 	if (length > BUF_MAX_DATA_SIZE)
2975 		goto out;
2976 
2977 	if (unlikely(trace_recursive_lock(cpu_buffer)))
2978 		goto out;
2979 
2980 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2981 	if (!event)
2982 		goto out_unlock;
2983 
2984 	body = rb_event_data(event);
2985 
2986 	memcpy(body, data, length);
2987 
2988 	rb_commit(cpu_buffer, event);
2989 
2990 	rb_wakeups(buffer, cpu_buffer);
2991 
2992 	ret = 0;
2993 
2994  out_unlock:
2995 	trace_recursive_unlock(cpu_buffer);
2996 
2997  out:
2998 	preempt_enable_notrace();
2999 
3000 	return ret;
3001 }
3002 EXPORT_SYMBOL_GPL(ring_buffer_write);
3003 
3004 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3005 {
3006 	struct buffer_page *reader = cpu_buffer->reader_page;
3007 	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3008 	struct buffer_page *commit = cpu_buffer->commit_page;
3009 
3010 	/* In case of error, head will be NULL */
3011 	if (unlikely(!head))
3012 		return true;
3013 
3014 	return reader->read == rb_page_commit(reader) &&
3015 		(commit == reader ||
3016 		 (commit == head &&
3017 		  head->read == rb_page_commit(commit)));
3018 }
3019 
3020 /**
3021  * ring_buffer_record_disable - stop all writes into the buffer
3022  * @buffer: The ring buffer to stop writes to.
3023  *
3024  * This prevents all writes to the buffer. Any attempt to write
3025  * to the buffer after this will fail and return NULL.
3026  *
3027  * The caller should call synchronize_sched() after this.
3028  */
3029 void ring_buffer_record_disable(struct ring_buffer *buffer)
3030 {
3031 	atomic_inc(&buffer->record_disabled);
3032 }
3033 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3034 
3035 /**
3036  * ring_buffer_record_enable - enable writes to the buffer
3037  * @buffer: The ring buffer to enable writes
3038  *
3039  * Note, multiple disables will need the same number of enables
3040  * to truly enable the writing (much like preempt_disable).
3041  */
3042 void ring_buffer_record_enable(struct ring_buffer *buffer)
3043 {
3044 	atomic_dec(&buffer->record_disabled);
3045 }
3046 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3047 
3048 /**
3049  * ring_buffer_record_off - stop all writes into the buffer
3050  * @buffer: The ring buffer to stop writes to.
3051  *
3052  * This prevents all writes to the buffer. Any attempt to write
3053  * to the buffer after this will fail and return NULL.
3054  *
3055  * This is different than ring_buffer_record_disable() as
3056  * it works like an on/off switch, where as the disable() version
3057  * must be paired with a enable().
3058  */
3059 void ring_buffer_record_off(struct ring_buffer *buffer)
3060 {
3061 	unsigned int rd;
3062 	unsigned int new_rd;
3063 
3064 	do {
3065 		rd = atomic_read(&buffer->record_disabled);
3066 		new_rd = rd | RB_BUFFER_OFF;
3067 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3068 }
3069 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3070 
3071 /**
3072  * ring_buffer_record_on - restart writes into the buffer
3073  * @buffer: The ring buffer to start writes to.
3074  *
3075  * This enables all writes to the buffer that was disabled by
3076  * ring_buffer_record_off().
3077  *
3078  * This is different than ring_buffer_record_enable() as
3079  * it works like an on/off switch, where as the enable() version
3080  * must be paired with a disable().
3081  */
3082 void ring_buffer_record_on(struct ring_buffer *buffer)
3083 {
3084 	unsigned int rd;
3085 	unsigned int new_rd;
3086 
3087 	do {
3088 		rd = atomic_read(&buffer->record_disabled);
3089 		new_rd = rd & ~RB_BUFFER_OFF;
3090 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3091 }
3092 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3093 
3094 /**
3095  * ring_buffer_record_is_on - return true if the ring buffer can write
3096  * @buffer: The ring buffer to see if write is enabled
3097  *
3098  * Returns true if the ring buffer is in a state that it accepts writes.
3099  */
3100 int ring_buffer_record_is_on(struct ring_buffer *buffer)
3101 {
3102 	return !atomic_read(&buffer->record_disabled);
3103 }
3104 
3105 /**
3106  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3107  * @buffer: The ring buffer to stop writes to.
3108  * @cpu: The CPU buffer to stop
3109  *
3110  * This prevents all writes to the buffer. Any attempt to write
3111  * to the buffer after this will fail and return NULL.
3112  *
3113  * The caller should call synchronize_sched() after this.
3114  */
3115 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3116 {
3117 	struct ring_buffer_per_cpu *cpu_buffer;
3118 
3119 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3120 		return;
3121 
3122 	cpu_buffer = buffer->buffers[cpu];
3123 	atomic_inc(&cpu_buffer->record_disabled);
3124 }
3125 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3126 
3127 /**
3128  * ring_buffer_record_enable_cpu - enable writes to the buffer
3129  * @buffer: The ring buffer to enable writes
3130  * @cpu: The CPU to enable.
3131  *
3132  * Note, multiple disables will need the same number of enables
3133  * to truly enable the writing (much like preempt_disable).
3134  */
3135 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3136 {
3137 	struct ring_buffer_per_cpu *cpu_buffer;
3138 
3139 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3140 		return;
3141 
3142 	cpu_buffer = buffer->buffers[cpu];
3143 	atomic_dec(&cpu_buffer->record_disabled);
3144 }
3145 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3146 
3147 /*
3148  * The total entries in the ring buffer is the running counter
3149  * of entries entered into the ring buffer, minus the sum of
3150  * the entries read from the ring buffer and the number of
3151  * entries that were overwritten.
3152  */
3153 static inline unsigned long
3154 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3155 {
3156 	return local_read(&cpu_buffer->entries) -
3157 		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3158 }
3159 
3160 /**
3161  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3162  * @buffer: The ring buffer
3163  * @cpu: The per CPU buffer to read from.
3164  */
3165 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3166 {
3167 	unsigned long flags;
3168 	struct ring_buffer_per_cpu *cpu_buffer;
3169 	struct buffer_page *bpage;
3170 	u64 ret = 0;
3171 
3172 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3173 		return 0;
3174 
3175 	cpu_buffer = buffer->buffers[cpu];
3176 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3177 	/*
3178 	 * if the tail is on reader_page, oldest time stamp is on the reader
3179 	 * page
3180 	 */
3181 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3182 		bpage = cpu_buffer->reader_page;
3183 	else
3184 		bpage = rb_set_head_page(cpu_buffer);
3185 	if (bpage)
3186 		ret = bpage->page->time_stamp;
3187 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3188 
3189 	return ret;
3190 }
3191 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3192 
3193 /**
3194  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3195  * @buffer: The ring buffer
3196  * @cpu: The per CPU buffer to read from.
3197  */
3198 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3199 {
3200 	struct ring_buffer_per_cpu *cpu_buffer;
3201 	unsigned long ret;
3202 
3203 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3204 		return 0;
3205 
3206 	cpu_buffer = buffer->buffers[cpu];
3207 	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3208 
3209 	return ret;
3210 }
3211 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3212 
3213 /**
3214  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3215  * @buffer: The ring buffer
3216  * @cpu: The per CPU buffer to get the entries from.
3217  */
3218 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3219 {
3220 	struct ring_buffer_per_cpu *cpu_buffer;
3221 
3222 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3223 		return 0;
3224 
3225 	cpu_buffer = buffer->buffers[cpu];
3226 
3227 	return rb_num_of_entries(cpu_buffer);
3228 }
3229 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3230 
3231 /**
3232  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3233  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3234  * @buffer: The ring buffer
3235  * @cpu: The per CPU buffer to get the number of overruns from
3236  */
3237 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3238 {
3239 	struct ring_buffer_per_cpu *cpu_buffer;
3240 	unsigned long ret;
3241 
3242 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3243 		return 0;
3244 
3245 	cpu_buffer = buffer->buffers[cpu];
3246 	ret = local_read(&cpu_buffer->overrun);
3247 
3248 	return ret;
3249 }
3250 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3251 
3252 /**
3253  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3254  * commits failing due to the buffer wrapping around while there are uncommitted
3255  * events, such as during an interrupt storm.
3256  * @buffer: The ring buffer
3257  * @cpu: The per CPU buffer to get the number of overruns from
3258  */
3259 unsigned long
3260 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3261 {
3262 	struct ring_buffer_per_cpu *cpu_buffer;
3263 	unsigned long ret;
3264 
3265 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3266 		return 0;
3267 
3268 	cpu_buffer = buffer->buffers[cpu];
3269 	ret = local_read(&cpu_buffer->commit_overrun);
3270 
3271 	return ret;
3272 }
3273 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3274 
3275 /**
3276  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3277  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3278  * @buffer: The ring buffer
3279  * @cpu: The per CPU buffer to get the number of overruns from
3280  */
3281 unsigned long
3282 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3283 {
3284 	struct ring_buffer_per_cpu *cpu_buffer;
3285 	unsigned long ret;
3286 
3287 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3288 		return 0;
3289 
3290 	cpu_buffer = buffer->buffers[cpu];
3291 	ret = local_read(&cpu_buffer->dropped_events);
3292 
3293 	return ret;
3294 }
3295 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3296 
3297 /**
3298  * ring_buffer_read_events_cpu - get the number of events successfully read
3299  * @buffer: The ring buffer
3300  * @cpu: The per CPU buffer to get the number of events read
3301  */
3302 unsigned long
3303 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3304 {
3305 	struct ring_buffer_per_cpu *cpu_buffer;
3306 
3307 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3308 		return 0;
3309 
3310 	cpu_buffer = buffer->buffers[cpu];
3311 	return cpu_buffer->read;
3312 }
3313 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3314 
3315 /**
3316  * ring_buffer_entries - get the number of entries in a buffer
3317  * @buffer: The ring buffer
3318  *
3319  * Returns the total number of entries in the ring buffer
3320  * (all CPU entries)
3321  */
3322 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3323 {
3324 	struct ring_buffer_per_cpu *cpu_buffer;
3325 	unsigned long entries = 0;
3326 	int cpu;
3327 
3328 	/* if you care about this being correct, lock the buffer */
3329 	for_each_buffer_cpu(buffer, cpu) {
3330 		cpu_buffer = buffer->buffers[cpu];
3331 		entries += rb_num_of_entries(cpu_buffer);
3332 	}
3333 
3334 	return entries;
3335 }
3336 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3337 
3338 /**
3339  * ring_buffer_overruns - get the number of overruns in buffer
3340  * @buffer: The ring buffer
3341  *
3342  * Returns the total number of overruns in the ring buffer
3343  * (all CPU entries)
3344  */
3345 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3346 {
3347 	struct ring_buffer_per_cpu *cpu_buffer;
3348 	unsigned long overruns = 0;
3349 	int cpu;
3350 
3351 	/* if you care about this being correct, lock the buffer */
3352 	for_each_buffer_cpu(buffer, cpu) {
3353 		cpu_buffer = buffer->buffers[cpu];
3354 		overruns += local_read(&cpu_buffer->overrun);
3355 	}
3356 
3357 	return overruns;
3358 }
3359 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3360 
3361 static void rb_iter_reset(struct ring_buffer_iter *iter)
3362 {
3363 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3364 
3365 	/* Iterator usage is expected to have record disabled */
3366 	iter->head_page = cpu_buffer->reader_page;
3367 	iter->head = cpu_buffer->reader_page->read;
3368 
3369 	iter->cache_reader_page = iter->head_page;
3370 	iter->cache_read = cpu_buffer->read;
3371 
3372 	if (iter->head)
3373 		iter->read_stamp = cpu_buffer->read_stamp;
3374 	else
3375 		iter->read_stamp = iter->head_page->page->time_stamp;
3376 }
3377 
3378 /**
3379  * ring_buffer_iter_reset - reset an iterator
3380  * @iter: The iterator to reset
3381  *
3382  * Resets the iterator, so that it will start from the beginning
3383  * again.
3384  */
3385 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3386 {
3387 	struct ring_buffer_per_cpu *cpu_buffer;
3388 	unsigned long flags;
3389 
3390 	if (!iter)
3391 		return;
3392 
3393 	cpu_buffer = iter->cpu_buffer;
3394 
3395 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3396 	rb_iter_reset(iter);
3397 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3398 }
3399 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3400 
3401 /**
3402  * ring_buffer_iter_empty - check if an iterator has no more to read
3403  * @iter: The iterator to check
3404  */
3405 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3406 {
3407 	struct ring_buffer_per_cpu *cpu_buffer;
3408 	struct buffer_page *reader;
3409 	struct buffer_page *head_page;
3410 	struct buffer_page *commit_page;
3411 	unsigned commit;
3412 
3413 	cpu_buffer = iter->cpu_buffer;
3414 
3415 	/* Remember, trace recording is off when iterator is in use */
3416 	reader = cpu_buffer->reader_page;
3417 	head_page = cpu_buffer->head_page;
3418 	commit_page = cpu_buffer->commit_page;
3419 	commit = rb_page_commit(commit_page);
3420 
3421 	return ((iter->head_page == commit_page && iter->head == commit) ||
3422 		(iter->head_page == reader && commit_page == head_page &&
3423 		 head_page->read == commit &&
3424 		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
3425 }
3426 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3427 
3428 static void
3429 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3430 		     struct ring_buffer_event *event)
3431 {
3432 	u64 delta;
3433 
3434 	switch (event->type_len) {
3435 	case RINGBUF_TYPE_PADDING:
3436 		return;
3437 
3438 	case RINGBUF_TYPE_TIME_EXTEND:
3439 		delta = event->array[0];
3440 		delta <<= TS_SHIFT;
3441 		delta += event->time_delta;
3442 		cpu_buffer->read_stamp += delta;
3443 		return;
3444 
3445 	case RINGBUF_TYPE_TIME_STAMP:
3446 		/* FIXME: not implemented */
3447 		return;
3448 
3449 	case RINGBUF_TYPE_DATA:
3450 		cpu_buffer->read_stamp += event->time_delta;
3451 		return;
3452 
3453 	default:
3454 		BUG();
3455 	}
3456 	return;
3457 }
3458 
3459 static void
3460 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3461 			  struct ring_buffer_event *event)
3462 {
3463 	u64 delta;
3464 
3465 	switch (event->type_len) {
3466 	case RINGBUF_TYPE_PADDING:
3467 		return;
3468 
3469 	case RINGBUF_TYPE_TIME_EXTEND:
3470 		delta = event->array[0];
3471 		delta <<= TS_SHIFT;
3472 		delta += event->time_delta;
3473 		iter->read_stamp += delta;
3474 		return;
3475 
3476 	case RINGBUF_TYPE_TIME_STAMP:
3477 		/* FIXME: not implemented */
3478 		return;
3479 
3480 	case RINGBUF_TYPE_DATA:
3481 		iter->read_stamp += event->time_delta;
3482 		return;
3483 
3484 	default:
3485 		BUG();
3486 	}
3487 	return;
3488 }
3489 
3490 static struct buffer_page *
3491 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3492 {
3493 	struct buffer_page *reader = NULL;
3494 	unsigned long overwrite;
3495 	unsigned long flags;
3496 	int nr_loops = 0;
3497 	int ret;
3498 
3499 	local_irq_save(flags);
3500 	arch_spin_lock(&cpu_buffer->lock);
3501 
3502  again:
3503 	/*
3504 	 * This should normally only loop twice. But because the
3505 	 * start of the reader inserts an empty page, it causes
3506 	 * a case where we will loop three times. There should be no
3507 	 * reason to loop four times (that I know of).
3508 	 */
3509 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3510 		reader = NULL;
3511 		goto out;
3512 	}
3513 
3514 	reader = cpu_buffer->reader_page;
3515 
3516 	/* If there's more to read, return this page */
3517 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3518 		goto out;
3519 
3520 	/* Never should we have an index greater than the size */
3521 	if (RB_WARN_ON(cpu_buffer,
3522 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
3523 		goto out;
3524 
3525 	/* check if we caught up to the tail */
3526 	reader = NULL;
3527 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3528 		goto out;
3529 
3530 	/* Don't bother swapping if the ring buffer is empty */
3531 	if (rb_num_of_entries(cpu_buffer) == 0)
3532 		goto out;
3533 
3534 	/*
3535 	 * Reset the reader page to size zero.
3536 	 */
3537 	local_set(&cpu_buffer->reader_page->write, 0);
3538 	local_set(&cpu_buffer->reader_page->entries, 0);
3539 	local_set(&cpu_buffer->reader_page->page->commit, 0);
3540 	cpu_buffer->reader_page->real_end = 0;
3541 
3542  spin:
3543 	/*
3544 	 * Splice the empty reader page into the list around the head.
3545 	 */
3546 	reader = rb_set_head_page(cpu_buffer);
3547 	if (!reader)
3548 		goto out;
3549 	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3550 	cpu_buffer->reader_page->list.prev = reader->list.prev;
3551 
3552 	/*
3553 	 * cpu_buffer->pages just needs to point to the buffer, it
3554 	 *  has no specific buffer page to point to. Lets move it out
3555 	 *  of our way so we don't accidentally swap it.
3556 	 */
3557 	cpu_buffer->pages = reader->list.prev;
3558 
3559 	/* The reader page will be pointing to the new head */
3560 	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3561 
3562 	/*
3563 	 * We want to make sure we read the overruns after we set up our
3564 	 * pointers to the next object. The writer side does a
3565 	 * cmpxchg to cross pages which acts as the mb on the writer
3566 	 * side. Note, the reader will constantly fail the swap
3567 	 * while the writer is updating the pointers, so this
3568 	 * guarantees that the overwrite recorded here is the one we
3569 	 * want to compare with the last_overrun.
3570 	 */
3571 	smp_mb();
3572 	overwrite = local_read(&(cpu_buffer->overrun));
3573 
3574 	/*
3575 	 * Here's the tricky part.
3576 	 *
3577 	 * We need to move the pointer past the header page.
3578 	 * But we can only do that if a writer is not currently
3579 	 * moving it. The page before the header page has the
3580 	 * flag bit '1' set if it is pointing to the page we want.
3581 	 * but if the writer is in the process of moving it
3582 	 * than it will be '2' or already moved '0'.
3583 	 */
3584 
3585 	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3586 
3587 	/*
3588 	 * If we did not convert it, then we must try again.
3589 	 */
3590 	if (!ret)
3591 		goto spin;
3592 
3593 	/*
3594 	 * Yeah! We succeeded in replacing the page.
3595 	 *
3596 	 * Now make the new head point back to the reader page.
3597 	 */
3598 	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3599 	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3600 
3601 	/* Finally update the reader page to the new head */
3602 	cpu_buffer->reader_page = reader;
3603 	cpu_buffer->reader_page->read = 0;
3604 
3605 	if (overwrite != cpu_buffer->last_overrun) {
3606 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3607 		cpu_buffer->last_overrun = overwrite;
3608 	}
3609 
3610 	goto again;
3611 
3612  out:
3613 	/* Update the read_stamp on the first event */
3614 	if (reader && reader->read == 0)
3615 		cpu_buffer->read_stamp = reader->page->time_stamp;
3616 
3617 	arch_spin_unlock(&cpu_buffer->lock);
3618 	local_irq_restore(flags);
3619 
3620 	return reader;
3621 }
3622 
3623 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3624 {
3625 	struct ring_buffer_event *event;
3626 	struct buffer_page *reader;
3627 	unsigned length;
3628 
3629 	reader = rb_get_reader_page(cpu_buffer);
3630 
3631 	/* This function should not be called when buffer is empty */
3632 	if (RB_WARN_ON(cpu_buffer, !reader))
3633 		return;
3634 
3635 	event = rb_reader_event(cpu_buffer);
3636 
3637 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3638 		cpu_buffer->read++;
3639 
3640 	rb_update_read_stamp(cpu_buffer, event);
3641 
3642 	length = rb_event_length(event);
3643 	cpu_buffer->reader_page->read += length;
3644 }
3645 
3646 static void rb_advance_iter(struct ring_buffer_iter *iter)
3647 {
3648 	struct ring_buffer_per_cpu *cpu_buffer;
3649 	struct ring_buffer_event *event;
3650 	unsigned length;
3651 
3652 	cpu_buffer = iter->cpu_buffer;
3653 
3654 	/*
3655 	 * Check if we are at the end of the buffer.
3656 	 */
3657 	if (iter->head >= rb_page_size(iter->head_page)) {
3658 		/* discarded commits can make the page empty */
3659 		if (iter->head_page == cpu_buffer->commit_page)
3660 			return;
3661 		rb_inc_iter(iter);
3662 		return;
3663 	}
3664 
3665 	event = rb_iter_head_event(iter);
3666 
3667 	length = rb_event_length(event);
3668 
3669 	/*
3670 	 * This should not be called to advance the header if we are
3671 	 * at the tail of the buffer.
3672 	 */
3673 	if (RB_WARN_ON(cpu_buffer,
3674 		       (iter->head_page == cpu_buffer->commit_page) &&
3675 		       (iter->head + length > rb_commit_index(cpu_buffer))))
3676 		return;
3677 
3678 	rb_update_iter_read_stamp(iter, event);
3679 
3680 	iter->head += length;
3681 
3682 	/* check for end of page padding */
3683 	if ((iter->head >= rb_page_size(iter->head_page)) &&
3684 	    (iter->head_page != cpu_buffer->commit_page))
3685 		rb_inc_iter(iter);
3686 }
3687 
3688 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3689 {
3690 	return cpu_buffer->lost_events;
3691 }
3692 
3693 static struct ring_buffer_event *
3694 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3695 	       unsigned long *lost_events)
3696 {
3697 	struct ring_buffer_event *event;
3698 	struct buffer_page *reader;
3699 	int nr_loops = 0;
3700 
3701  again:
3702 	/*
3703 	 * We repeat when a time extend is encountered.
3704 	 * Since the time extend is always attached to a data event,
3705 	 * we should never loop more than once.
3706 	 * (We never hit the following condition more than twice).
3707 	 */
3708 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3709 		return NULL;
3710 
3711 	reader = rb_get_reader_page(cpu_buffer);
3712 	if (!reader)
3713 		return NULL;
3714 
3715 	event = rb_reader_event(cpu_buffer);
3716 
3717 	switch (event->type_len) {
3718 	case RINGBUF_TYPE_PADDING:
3719 		if (rb_null_event(event))
3720 			RB_WARN_ON(cpu_buffer, 1);
3721 		/*
3722 		 * Because the writer could be discarding every
3723 		 * event it creates (which would probably be bad)
3724 		 * if we were to go back to "again" then we may never
3725 		 * catch up, and will trigger the warn on, or lock
3726 		 * the box. Return the padding, and we will release
3727 		 * the current locks, and try again.
3728 		 */
3729 		return event;
3730 
3731 	case RINGBUF_TYPE_TIME_EXTEND:
3732 		/* Internal data, OK to advance */
3733 		rb_advance_reader(cpu_buffer);
3734 		goto again;
3735 
3736 	case RINGBUF_TYPE_TIME_STAMP:
3737 		/* FIXME: not implemented */
3738 		rb_advance_reader(cpu_buffer);
3739 		goto again;
3740 
3741 	case RINGBUF_TYPE_DATA:
3742 		if (ts) {
3743 			*ts = cpu_buffer->read_stamp + event->time_delta;
3744 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3745 							 cpu_buffer->cpu, ts);
3746 		}
3747 		if (lost_events)
3748 			*lost_events = rb_lost_events(cpu_buffer);
3749 		return event;
3750 
3751 	default:
3752 		BUG();
3753 	}
3754 
3755 	return NULL;
3756 }
3757 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3758 
3759 static struct ring_buffer_event *
3760 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3761 {
3762 	struct ring_buffer *buffer;
3763 	struct ring_buffer_per_cpu *cpu_buffer;
3764 	struct ring_buffer_event *event;
3765 	int nr_loops = 0;
3766 
3767 	cpu_buffer = iter->cpu_buffer;
3768 	buffer = cpu_buffer->buffer;
3769 
3770 	/*
3771 	 * Check if someone performed a consuming read to
3772 	 * the buffer. A consuming read invalidates the iterator
3773 	 * and we need to reset the iterator in this case.
3774 	 */
3775 	if (unlikely(iter->cache_read != cpu_buffer->read ||
3776 		     iter->cache_reader_page != cpu_buffer->reader_page))
3777 		rb_iter_reset(iter);
3778 
3779  again:
3780 	if (ring_buffer_iter_empty(iter))
3781 		return NULL;
3782 
3783 	/*
3784 	 * We repeat when a time extend is encountered or we hit
3785 	 * the end of the page. Since the time extend is always attached
3786 	 * to a data event, we should never loop more than three times.
3787 	 * Once for going to next page, once on time extend, and
3788 	 * finally once to get the event.
3789 	 * (We never hit the following condition more than thrice).
3790 	 */
3791 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3792 		return NULL;
3793 
3794 	if (rb_per_cpu_empty(cpu_buffer))
3795 		return NULL;
3796 
3797 	if (iter->head >= rb_page_size(iter->head_page)) {
3798 		rb_inc_iter(iter);
3799 		goto again;
3800 	}
3801 
3802 	event = rb_iter_head_event(iter);
3803 
3804 	switch (event->type_len) {
3805 	case RINGBUF_TYPE_PADDING:
3806 		if (rb_null_event(event)) {
3807 			rb_inc_iter(iter);
3808 			goto again;
3809 		}
3810 		rb_advance_iter(iter);
3811 		return event;
3812 
3813 	case RINGBUF_TYPE_TIME_EXTEND:
3814 		/* Internal data, OK to advance */
3815 		rb_advance_iter(iter);
3816 		goto again;
3817 
3818 	case RINGBUF_TYPE_TIME_STAMP:
3819 		/* FIXME: not implemented */
3820 		rb_advance_iter(iter);
3821 		goto again;
3822 
3823 	case RINGBUF_TYPE_DATA:
3824 		if (ts) {
3825 			*ts = iter->read_stamp + event->time_delta;
3826 			ring_buffer_normalize_time_stamp(buffer,
3827 							 cpu_buffer->cpu, ts);
3828 		}
3829 		return event;
3830 
3831 	default:
3832 		BUG();
3833 	}
3834 
3835 	return NULL;
3836 }
3837 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3838 
3839 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
3840 {
3841 	if (likely(!in_nmi())) {
3842 		raw_spin_lock(&cpu_buffer->reader_lock);
3843 		return true;
3844 	}
3845 
3846 	/*
3847 	 * If an NMI die dumps out the content of the ring buffer
3848 	 * trylock must be used to prevent a deadlock if the NMI
3849 	 * preempted a task that holds the ring buffer locks. If
3850 	 * we get the lock then all is fine, if not, then continue
3851 	 * to do the read, but this can corrupt the ring buffer,
3852 	 * so it must be permanently disabled from future writes.
3853 	 * Reading from NMI is a oneshot deal.
3854 	 */
3855 	if (raw_spin_trylock(&cpu_buffer->reader_lock))
3856 		return true;
3857 
3858 	/* Continue without locking, but disable the ring buffer */
3859 	atomic_inc(&cpu_buffer->record_disabled);
3860 	return false;
3861 }
3862 
3863 static inline void
3864 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
3865 {
3866 	if (likely(locked))
3867 		raw_spin_unlock(&cpu_buffer->reader_lock);
3868 	return;
3869 }
3870 
3871 /**
3872  * ring_buffer_peek - peek at the next event to be read
3873  * @buffer: The ring buffer to read
3874  * @cpu: The cpu to peak at
3875  * @ts: The timestamp counter of this event.
3876  * @lost_events: a variable to store if events were lost (may be NULL)
3877  *
3878  * This will return the event that will be read next, but does
3879  * not consume the data.
3880  */
3881 struct ring_buffer_event *
3882 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3883 		 unsigned long *lost_events)
3884 {
3885 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3886 	struct ring_buffer_event *event;
3887 	unsigned long flags;
3888 	bool dolock;
3889 
3890 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3891 		return NULL;
3892 
3893  again:
3894 	local_irq_save(flags);
3895 	dolock = rb_reader_lock(cpu_buffer);
3896 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3897 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3898 		rb_advance_reader(cpu_buffer);
3899 	rb_reader_unlock(cpu_buffer, dolock);
3900 	local_irq_restore(flags);
3901 
3902 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3903 		goto again;
3904 
3905 	return event;
3906 }
3907 
3908 /**
3909  * ring_buffer_iter_peek - peek at the next event to be read
3910  * @iter: The ring buffer iterator
3911  * @ts: The timestamp counter of this event.
3912  *
3913  * This will return the event that will be read next, but does
3914  * not increment the iterator.
3915  */
3916 struct ring_buffer_event *
3917 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3918 {
3919 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3920 	struct ring_buffer_event *event;
3921 	unsigned long flags;
3922 
3923  again:
3924 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3925 	event = rb_iter_peek(iter, ts);
3926 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3927 
3928 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3929 		goto again;
3930 
3931 	return event;
3932 }
3933 
3934 /**
3935  * ring_buffer_consume - return an event and consume it
3936  * @buffer: The ring buffer to get the next event from
3937  * @cpu: the cpu to read the buffer from
3938  * @ts: a variable to store the timestamp (may be NULL)
3939  * @lost_events: a variable to store if events were lost (may be NULL)
3940  *
3941  * Returns the next event in the ring buffer, and that event is consumed.
3942  * Meaning, that sequential reads will keep returning a different event,
3943  * and eventually empty the ring buffer if the producer is slower.
3944  */
3945 struct ring_buffer_event *
3946 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3947 		    unsigned long *lost_events)
3948 {
3949 	struct ring_buffer_per_cpu *cpu_buffer;
3950 	struct ring_buffer_event *event = NULL;
3951 	unsigned long flags;
3952 	bool dolock;
3953 
3954  again:
3955 	/* might be called in atomic */
3956 	preempt_disable();
3957 
3958 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3959 		goto out;
3960 
3961 	cpu_buffer = buffer->buffers[cpu];
3962 	local_irq_save(flags);
3963 	dolock = rb_reader_lock(cpu_buffer);
3964 
3965 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3966 	if (event) {
3967 		cpu_buffer->lost_events = 0;
3968 		rb_advance_reader(cpu_buffer);
3969 	}
3970 
3971 	rb_reader_unlock(cpu_buffer, dolock);
3972 	local_irq_restore(flags);
3973 
3974  out:
3975 	preempt_enable();
3976 
3977 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3978 		goto again;
3979 
3980 	return event;
3981 }
3982 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3983 
3984 /**
3985  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3986  * @buffer: The ring buffer to read from
3987  * @cpu: The cpu buffer to iterate over
3988  *
3989  * This performs the initial preparations necessary to iterate
3990  * through the buffer.  Memory is allocated, buffer recording
3991  * is disabled, and the iterator pointer is returned to the caller.
3992  *
3993  * Disabling buffer recordng prevents the reading from being
3994  * corrupted. This is not a consuming read, so a producer is not
3995  * expected.
3996  *
3997  * After a sequence of ring_buffer_read_prepare calls, the user is
3998  * expected to make at least one call to ring_buffer_read_prepare_sync.
3999  * Afterwards, ring_buffer_read_start is invoked to get things going
4000  * for real.
4001  *
4002  * This overall must be paired with ring_buffer_read_finish.
4003  */
4004 struct ring_buffer_iter *
4005 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4006 {
4007 	struct ring_buffer_per_cpu *cpu_buffer;
4008 	struct ring_buffer_iter *iter;
4009 
4010 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4011 		return NULL;
4012 
4013 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
4014 	if (!iter)
4015 		return NULL;
4016 
4017 	cpu_buffer = buffer->buffers[cpu];
4018 
4019 	iter->cpu_buffer = cpu_buffer;
4020 
4021 	atomic_inc(&buffer->resize_disabled);
4022 	atomic_inc(&cpu_buffer->record_disabled);
4023 
4024 	return iter;
4025 }
4026 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4027 
4028 /**
4029  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4030  *
4031  * All previously invoked ring_buffer_read_prepare calls to prepare
4032  * iterators will be synchronized.  Afterwards, read_buffer_read_start
4033  * calls on those iterators are allowed.
4034  */
4035 void
4036 ring_buffer_read_prepare_sync(void)
4037 {
4038 	synchronize_sched();
4039 }
4040 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4041 
4042 /**
4043  * ring_buffer_read_start - start a non consuming read of the buffer
4044  * @iter: The iterator returned by ring_buffer_read_prepare
4045  *
4046  * This finalizes the startup of an iteration through the buffer.
4047  * The iterator comes from a call to ring_buffer_read_prepare and
4048  * an intervening ring_buffer_read_prepare_sync must have been
4049  * performed.
4050  *
4051  * Must be paired with ring_buffer_read_finish.
4052  */
4053 void
4054 ring_buffer_read_start(struct ring_buffer_iter *iter)
4055 {
4056 	struct ring_buffer_per_cpu *cpu_buffer;
4057 	unsigned long flags;
4058 
4059 	if (!iter)
4060 		return;
4061 
4062 	cpu_buffer = iter->cpu_buffer;
4063 
4064 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4065 	arch_spin_lock(&cpu_buffer->lock);
4066 	rb_iter_reset(iter);
4067 	arch_spin_unlock(&cpu_buffer->lock);
4068 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4069 }
4070 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4071 
4072 /**
4073  * ring_buffer_read_finish - finish reading the iterator of the buffer
4074  * @iter: The iterator retrieved by ring_buffer_start
4075  *
4076  * This re-enables the recording to the buffer, and frees the
4077  * iterator.
4078  */
4079 void
4080 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4081 {
4082 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4083 	unsigned long flags;
4084 
4085 	/*
4086 	 * Ring buffer is disabled from recording, here's a good place
4087 	 * to check the integrity of the ring buffer.
4088 	 * Must prevent readers from trying to read, as the check
4089 	 * clears the HEAD page and readers require it.
4090 	 */
4091 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4092 	rb_check_pages(cpu_buffer);
4093 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4094 
4095 	atomic_dec(&cpu_buffer->record_disabled);
4096 	atomic_dec(&cpu_buffer->buffer->resize_disabled);
4097 	kfree(iter);
4098 }
4099 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4100 
4101 /**
4102  * ring_buffer_read - read the next item in the ring buffer by the iterator
4103  * @iter: The ring buffer iterator
4104  * @ts: The time stamp of the event read.
4105  *
4106  * This reads the next event in the ring buffer and increments the iterator.
4107  */
4108 struct ring_buffer_event *
4109 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4110 {
4111 	struct ring_buffer_event *event;
4112 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4113 	unsigned long flags;
4114 
4115 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4116  again:
4117 	event = rb_iter_peek(iter, ts);
4118 	if (!event)
4119 		goto out;
4120 
4121 	if (event->type_len == RINGBUF_TYPE_PADDING)
4122 		goto again;
4123 
4124 	rb_advance_iter(iter);
4125  out:
4126 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4127 
4128 	return event;
4129 }
4130 EXPORT_SYMBOL_GPL(ring_buffer_read);
4131 
4132 /**
4133  * ring_buffer_size - return the size of the ring buffer (in bytes)
4134  * @buffer: The ring buffer.
4135  */
4136 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4137 {
4138 	/*
4139 	 * Earlier, this method returned
4140 	 *	BUF_PAGE_SIZE * buffer->nr_pages
4141 	 * Since the nr_pages field is now removed, we have converted this to
4142 	 * return the per cpu buffer value.
4143 	 */
4144 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4145 		return 0;
4146 
4147 	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4148 }
4149 EXPORT_SYMBOL_GPL(ring_buffer_size);
4150 
4151 static void
4152 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4153 {
4154 	rb_head_page_deactivate(cpu_buffer);
4155 
4156 	cpu_buffer->head_page
4157 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
4158 	local_set(&cpu_buffer->head_page->write, 0);
4159 	local_set(&cpu_buffer->head_page->entries, 0);
4160 	local_set(&cpu_buffer->head_page->page->commit, 0);
4161 
4162 	cpu_buffer->head_page->read = 0;
4163 
4164 	cpu_buffer->tail_page = cpu_buffer->head_page;
4165 	cpu_buffer->commit_page = cpu_buffer->head_page;
4166 
4167 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4168 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
4169 	local_set(&cpu_buffer->reader_page->write, 0);
4170 	local_set(&cpu_buffer->reader_page->entries, 0);
4171 	local_set(&cpu_buffer->reader_page->page->commit, 0);
4172 	cpu_buffer->reader_page->read = 0;
4173 
4174 	local_set(&cpu_buffer->entries_bytes, 0);
4175 	local_set(&cpu_buffer->overrun, 0);
4176 	local_set(&cpu_buffer->commit_overrun, 0);
4177 	local_set(&cpu_buffer->dropped_events, 0);
4178 	local_set(&cpu_buffer->entries, 0);
4179 	local_set(&cpu_buffer->committing, 0);
4180 	local_set(&cpu_buffer->commits, 0);
4181 	cpu_buffer->read = 0;
4182 	cpu_buffer->read_bytes = 0;
4183 
4184 	cpu_buffer->write_stamp = 0;
4185 	cpu_buffer->read_stamp = 0;
4186 
4187 	cpu_buffer->lost_events = 0;
4188 	cpu_buffer->last_overrun = 0;
4189 
4190 	rb_head_page_activate(cpu_buffer);
4191 }
4192 
4193 /**
4194  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4195  * @buffer: The ring buffer to reset a per cpu buffer of
4196  * @cpu: The CPU buffer to be reset
4197  */
4198 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4199 {
4200 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4201 	unsigned long flags;
4202 
4203 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4204 		return;
4205 
4206 	atomic_inc(&buffer->resize_disabled);
4207 	atomic_inc(&cpu_buffer->record_disabled);
4208 
4209 	/* Make sure all commits have finished */
4210 	synchronize_sched();
4211 
4212 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4213 
4214 	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4215 		goto out;
4216 
4217 	arch_spin_lock(&cpu_buffer->lock);
4218 
4219 	rb_reset_cpu(cpu_buffer);
4220 
4221 	arch_spin_unlock(&cpu_buffer->lock);
4222 
4223  out:
4224 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4225 
4226 	atomic_dec(&cpu_buffer->record_disabled);
4227 	atomic_dec(&buffer->resize_disabled);
4228 }
4229 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4230 
4231 /**
4232  * ring_buffer_reset - reset a ring buffer
4233  * @buffer: The ring buffer to reset all cpu buffers
4234  */
4235 void ring_buffer_reset(struct ring_buffer *buffer)
4236 {
4237 	int cpu;
4238 
4239 	for_each_buffer_cpu(buffer, cpu)
4240 		ring_buffer_reset_cpu(buffer, cpu);
4241 }
4242 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4243 
4244 /**
4245  * rind_buffer_empty - is the ring buffer empty?
4246  * @buffer: The ring buffer to test
4247  */
4248 bool ring_buffer_empty(struct ring_buffer *buffer)
4249 {
4250 	struct ring_buffer_per_cpu *cpu_buffer;
4251 	unsigned long flags;
4252 	bool dolock;
4253 	int cpu;
4254 	int ret;
4255 
4256 	/* yes this is racy, but if you don't like the race, lock the buffer */
4257 	for_each_buffer_cpu(buffer, cpu) {
4258 		cpu_buffer = buffer->buffers[cpu];
4259 		local_irq_save(flags);
4260 		dolock = rb_reader_lock(cpu_buffer);
4261 		ret = rb_per_cpu_empty(cpu_buffer);
4262 		rb_reader_unlock(cpu_buffer, dolock);
4263 		local_irq_restore(flags);
4264 
4265 		if (!ret)
4266 			return false;
4267 	}
4268 
4269 	return true;
4270 }
4271 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4272 
4273 /**
4274  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4275  * @buffer: The ring buffer
4276  * @cpu: The CPU buffer to test
4277  */
4278 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4279 {
4280 	struct ring_buffer_per_cpu *cpu_buffer;
4281 	unsigned long flags;
4282 	bool dolock;
4283 	int ret;
4284 
4285 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4286 		return true;
4287 
4288 	cpu_buffer = buffer->buffers[cpu];
4289 	local_irq_save(flags);
4290 	dolock = rb_reader_lock(cpu_buffer);
4291 	ret = rb_per_cpu_empty(cpu_buffer);
4292 	rb_reader_unlock(cpu_buffer, dolock);
4293 	local_irq_restore(flags);
4294 
4295 	return ret;
4296 }
4297 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4298 
4299 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4300 /**
4301  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4302  * @buffer_a: One buffer to swap with
4303  * @buffer_b: The other buffer to swap with
4304  *
4305  * This function is useful for tracers that want to take a "snapshot"
4306  * of a CPU buffer and has another back up buffer lying around.
4307  * it is expected that the tracer handles the cpu buffer not being
4308  * used at the moment.
4309  */
4310 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4311 			 struct ring_buffer *buffer_b, int cpu)
4312 {
4313 	struct ring_buffer_per_cpu *cpu_buffer_a;
4314 	struct ring_buffer_per_cpu *cpu_buffer_b;
4315 	int ret = -EINVAL;
4316 
4317 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4318 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4319 		goto out;
4320 
4321 	cpu_buffer_a = buffer_a->buffers[cpu];
4322 	cpu_buffer_b = buffer_b->buffers[cpu];
4323 
4324 	/* At least make sure the two buffers are somewhat the same */
4325 	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4326 		goto out;
4327 
4328 	ret = -EAGAIN;
4329 
4330 	if (atomic_read(&buffer_a->record_disabled))
4331 		goto out;
4332 
4333 	if (atomic_read(&buffer_b->record_disabled))
4334 		goto out;
4335 
4336 	if (atomic_read(&cpu_buffer_a->record_disabled))
4337 		goto out;
4338 
4339 	if (atomic_read(&cpu_buffer_b->record_disabled))
4340 		goto out;
4341 
4342 	/*
4343 	 * We can't do a synchronize_sched here because this
4344 	 * function can be called in atomic context.
4345 	 * Normally this will be called from the same CPU as cpu.
4346 	 * If not it's up to the caller to protect this.
4347 	 */
4348 	atomic_inc(&cpu_buffer_a->record_disabled);
4349 	atomic_inc(&cpu_buffer_b->record_disabled);
4350 
4351 	ret = -EBUSY;
4352 	if (local_read(&cpu_buffer_a->committing))
4353 		goto out_dec;
4354 	if (local_read(&cpu_buffer_b->committing))
4355 		goto out_dec;
4356 
4357 	buffer_a->buffers[cpu] = cpu_buffer_b;
4358 	buffer_b->buffers[cpu] = cpu_buffer_a;
4359 
4360 	cpu_buffer_b->buffer = buffer_a;
4361 	cpu_buffer_a->buffer = buffer_b;
4362 
4363 	ret = 0;
4364 
4365 out_dec:
4366 	atomic_dec(&cpu_buffer_a->record_disabled);
4367 	atomic_dec(&cpu_buffer_b->record_disabled);
4368 out:
4369 	return ret;
4370 }
4371 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4372 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4373 
4374 /**
4375  * ring_buffer_alloc_read_page - allocate a page to read from buffer
4376  * @buffer: the buffer to allocate for.
4377  * @cpu: the cpu buffer to allocate.
4378  *
4379  * This function is used in conjunction with ring_buffer_read_page.
4380  * When reading a full page from the ring buffer, these functions
4381  * can be used to speed up the process. The calling function should
4382  * allocate a few pages first with this function. Then when it
4383  * needs to get pages from the ring buffer, it passes the result
4384  * of this function into ring_buffer_read_page, which will swap
4385  * the page that was allocated, with the read page of the buffer.
4386  *
4387  * Returns:
4388  *  The page allocated, or NULL on error.
4389  */
4390 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4391 {
4392 	struct buffer_data_page *bpage;
4393 	struct page *page;
4394 
4395 	page = alloc_pages_node(cpu_to_node(cpu),
4396 				GFP_KERNEL | __GFP_NORETRY, 0);
4397 	if (!page)
4398 		return NULL;
4399 
4400 	bpage = page_address(page);
4401 
4402 	rb_init_page(bpage);
4403 
4404 	return bpage;
4405 }
4406 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4407 
4408 /**
4409  * ring_buffer_free_read_page - free an allocated read page
4410  * @buffer: the buffer the page was allocate for
4411  * @data: the page to free
4412  *
4413  * Free a page allocated from ring_buffer_alloc_read_page.
4414  */
4415 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4416 {
4417 	free_page((unsigned long)data);
4418 }
4419 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4420 
4421 /**
4422  * ring_buffer_read_page - extract a page from the ring buffer
4423  * @buffer: buffer to extract from
4424  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4425  * @len: amount to extract
4426  * @cpu: the cpu of the buffer to extract
4427  * @full: should the extraction only happen when the page is full.
4428  *
4429  * This function will pull out a page from the ring buffer and consume it.
4430  * @data_page must be the address of the variable that was returned
4431  * from ring_buffer_alloc_read_page. This is because the page might be used
4432  * to swap with a page in the ring buffer.
4433  *
4434  * for example:
4435  *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
4436  *	if (!rpage)
4437  *		return error;
4438  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4439  *	if (ret >= 0)
4440  *		process_page(rpage, ret);
4441  *
4442  * When @full is set, the function will not return true unless
4443  * the writer is off the reader page.
4444  *
4445  * Note: it is up to the calling functions to handle sleeps and wakeups.
4446  *  The ring buffer can be used anywhere in the kernel and can not
4447  *  blindly call wake_up. The layer that uses the ring buffer must be
4448  *  responsible for that.
4449  *
4450  * Returns:
4451  *  >=0 if data has been transferred, returns the offset of consumed data.
4452  *  <0 if no data has been transferred.
4453  */
4454 int ring_buffer_read_page(struct ring_buffer *buffer,
4455 			  void **data_page, size_t len, int cpu, int full)
4456 {
4457 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4458 	struct ring_buffer_event *event;
4459 	struct buffer_data_page *bpage;
4460 	struct buffer_page *reader;
4461 	unsigned long missed_events;
4462 	unsigned long flags;
4463 	unsigned int commit;
4464 	unsigned int read;
4465 	u64 save_timestamp;
4466 	int ret = -1;
4467 
4468 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4469 		goto out;
4470 
4471 	/*
4472 	 * If len is not big enough to hold the page header, then
4473 	 * we can not copy anything.
4474 	 */
4475 	if (len <= BUF_PAGE_HDR_SIZE)
4476 		goto out;
4477 
4478 	len -= BUF_PAGE_HDR_SIZE;
4479 
4480 	if (!data_page)
4481 		goto out;
4482 
4483 	bpage = *data_page;
4484 	if (!bpage)
4485 		goto out;
4486 
4487 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4488 
4489 	reader = rb_get_reader_page(cpu_buffer);
4490 	if (!reader)
4491 		goto out_unlock;
4492 
4493 	event = rb_reader_event(cpu_buffer);
4494 
4495 	read = reader->read;
4496 	commit = rb_page_commit(reader);
4497 
4498 	/* Check if any events were dropped */
4499 	missed_events = cpu_buffer->lost_events;
4500 
4501 	/*
4502 	 * If this page has been partially read or
4503 	 * if len is not big enough to read the rest of the page or
4504 	 * a writer is still on the page, then
4505 	 * we must copy the data from the page to the buffer.
4506 	 * Otherwise, we can simply swap the page with the one passed in.
4507 	 */
4508 	if (read || (len < (commit - read)) ||
4509 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4510 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4511 		unsigned int rpos = read;
4512 		unsigned int pos = 0;
4513 		unsigned int size;
4514 
4515 		if (full)
4516 			goto out_unlock;
4517 
4518 		if (len > (commit - read))
4519 			len = (commit - read);
4520 
4521 		/* Always keep the time extend and data together */
4522 		size = rb_event_ts_length(event);
4523 
4524 		if (len < size)
4525 			goto out_unlock;
4526 
4527 		/* save the current timestamp, since the user will need it */
4528 		save_timestamp = cpu_buffer->read_stamp;
4529 
4530 		/* Need to copy one event at a time */
4531 		do {
4532 			/* We need the size of one event, because
4533 			 * rb_advance_reader only advances by one event,
4534 			 * whereas rb_event_ts_length may include the size of
4535 			 * one or two events.
4536 			 * We have already ensured there's enough space if this
4537 			 * is a time extend. */
4538 			size = rb_event_length(event);
4539 			memcpy(bpage->data + pos, rpage->data + rpos, size);
4540 
4541 			len -= size;
4542 
4543 			rb_advance_reader(cpu_buffer);
4544 			rpos = reader->read;
4545 			pos += size;
4546 
4547 			if (rpos >= commit)
4548 				break;
4549 
4550 			event = rb_reader_event(cpu_buffer);
4551 			/* Always keep the time extend and data together */
4552 			size = rb_event_ts_length(event);
4553 		} while (len >= size);
4554 
4555 		/* update bpage */
4556 		local_set(&bpage->commit, pos);
4557 		bpage->time_stamp = save_timestamp;
4558 
4559 		/* we copied everything to the beginning */
4560 		read = 0;
4561 	} else {
4562 		/* update the entry counter */
4563 		cpu_buffer->read += rb_page_entries(reader);
4564 		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4565 
4566 		/* swap the pages */
4567 		rb_init_page(bpage);
4568 		bpage = reader->page;
4569 		reader->page = *data_page;
4570 		local_set(&reader->write, 0);
4571 		local_set(&reader->entries, 0);
4572 		reader->read = 0;
4573 		*data_page = bpage;
4574 
4575 		/*
4576 		 * Use the real_end for the data size,
4577 		 * This gives us a chance to store the lost events
4578 		 * on the page.
4579 		 */
4580 		if (reader->real_end)
4581 			local_set(&bpage->commit, reader->real_end);
4582 	}
4583 	ret = read;
4584 
4585 	cpu_buffer->lost_events = 0;
4586 
4587 	commit = local_read(&bpage->commit);
4588 	/*
4589 	 * Set a flag in the commit field if we lost events
4590 	 */
4591 	if (missed_events) {
4592 		/* If there is room at the end of the page to save the
4593 		 * missed events, then record it there.
4594 		 */
4595 		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4596 			memcpy(&bpage->data[commit], &missed_events,
4597 			       sizeof(missed_events));
4598 			local_add(RB_MISSED_STORED, &bpage->commit);
4599 			commit += sizeof(missed_events);
4600 		}
4601 		local_add(RB_MISSED_EVENTS, &bpage->commit);
4602 	}
4603 
4604 	/*
4605 	 * This page may be off to user land. Zero it out here.
4606 	 */
4607 	if (commit < BUF_PAGE_SIZE)
4608 		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4609 
4610  out_unlock:
4611 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4612 
4613  out:
4614 	return ret;
4615 }
4616 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4617 
4618 /*
4619  * We only allocate new buffers, never free them if the CPU goes down.
4620  * If we were to free the buffer, then the user would lose any trace that was in
4621  * the buffer.
4622  */
4623 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
4624 {
4625 	struct ring_buffer *buffer;
4626 	long nr_pages_same;
4627 	int cpu_i;
4628 	unsigned long nr_pages;
4629 
4630 	buffer = container_of(node, struct ring_buffer, node);
4631 	if (cpumask_test_cpu(cpu, buffer->cpumask))
4632 		return 0;
4633 
4634 	nr_pages = 0;
4635 	nr_pages_same = 1;
4636 	/* check if all cpu sizes are same */
4637 	for_each_buffer_cpu(buffer, cpu_i) {
4638 		/* fill in the size from first enabled cpu */
4639 		if (nr_pages == 0)
4640 			nr_pages = buffer->buffers[cpu_i]->nr_pages;
4641 		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4642 			nr_pages_same = 0;
4643 			break;
4644 		}
4645 	}
4646 	/* allocate minimum pages, user can later expand it */
4647 	if (!nr_pages_same)
4648 		nr_pages = 2;
4649 	buffer->buffers[cpu] =
4650 		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4651 	if (!buffer->buffers[cpu]) {
4652 		WARN(1, "failed to allocate ring buffer on CPU %u\n",
4653 		     cpu);
4654 		return -ENOMEM;
4655 	}
4656 	smp_wmb();
4657 	cpumask_set_cpu(cpu, buffer->cpumask);
4658 	return 0;
4659 }
4660 
4661 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4662 /*
4663  * This is a basic integrity check of the ring buffer.
4664  * Late in the boot cycle this test will run when configured in.
4665  * It will kick off a thread per CPU that will go into a loop
4666  * writing to the per cpu ring buffer various sizes of data.
4667  * Some of the data will be large items, some small.
4668  *
4669  * Another thread is created that goes into a spin, sending out
4670  * IPIs to the other CPUs to also write into the ring buffer.
4671  * this is to test the nesting ability of the buffer.
4672  *
4673  * Basic stats are recorded and reported. If something in the
4674  * ring buffer should happen that's not expected, a big warning
4675  * is displayed and all ring buffers are disabled.
4676  */
4677 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4678 
4679 struct rb_test_data {
4680 	struct ring_buffer	*buffer;
4681 	unsigned long		events;
4682 	unsigned long		bytes_written;
4683 	unsigned long		bytes_alloc;
4684 	unsigned long		bytes_dropped;
4685 	unsigned long		events_nested;
4686 	unsigned long		bytes_written_nested;
4687 	unsigned long		bytes_alloc_nested;
4688 	unsigned long		bytes_dropped_nested;
4689 	int			min_size_nested;
4690 	int			max_size_nested;
4691 	int			max_size;
4692 	int			min_size;
4693 	int			cpu;
4694 	int			cnt;
4695 };
4696 
4697 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4698 
4699 /* 1 meg per cpu */
4700 #define RB_TEST_BUFFER_SIZE	1048576
4701 
4702 static char rb_string[] __initdata =
4703 	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4704 	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4705 	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4706 
4707 static bool rb_test_started __initdata;
4708 
4709 struct rb_item {
4710 	int size;
4711 	char str[];
4712 };
4713 
4714 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4715 {
4716 	struct ring_buffer_event *event;
4717 	struct rb_item *item;
4718 	bool started;
4719 	int event_len;
4720 	int size;
4721 	int len;
4722 	int cnt;
4723 
4724 	/* Have nested writes different that what is written */
4725 	cnt = data->cnt + (nested ? 27 : 0);
4726 
4727 	/* Multiply cnt by ~e, to make some unique increment */
4728 	size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4729 
4730 	len = size + sizeof(struct rb_item);
4731 
4732 	started = rb_test_started;
4733 	/* read rb_test_started before checking buffer enabled */
4734 	smp_rmb();
4735 
4736 	event = ring_buffer_lock_reserve(data->buffer, len);
4737 	if (!event) {
4738 		/* Ignore dropped events before test starts. */
4739 		if (started) {
4740 			if (nested)
4741 				data->bytes_dropped += len;
4742 			else
4743 				data->bytes_dropped_nested += len;
4744 		}
4745 		return len;
4746 	}
4747 
4748 	event_len = ring_buffer_event_length(event);
4749 
4750 	if (RB_WARN_ON(data->buffer, event_len < len))
4751 		goto out;
4752 
4753 	item = ring_buffer_event_data(event);
4754 	item->size = size;
4755 	memcpy(item->str, rb_string, size);
4756 
4757 	if (nested) {
4758 		data->bytes_alloc_nested += event_len;
4759 		data->bytes_written_nested += len;
4760 		data->events_nested++;
4761 		if (!data->min_size_nested || len < data->min_size_nested)
4762 			data->min_size_nested = len;
4763 		if (len > data->max_size_nested)
4764 			data->max_size_nested = len;
4765 	} else {
4766 		data->bytes_alloc += event_len;
4767 		data->bytes_written += len;
4768 		data->events++;
4769 		if (!data->min_size || len < data->min_size)
4770 			data->max_size = len;
4771 		if (len > data->max_size)
4772 			data->max_size = len;
4773 	}
4774 
4775  out:
4776 	ring_buffer_unlock_commit(data->buffer, event);
4777 
4778 	return 0;
4779 }
4780 
4781 static __init int rb_test(void *arg)
4782 {
4783 	struct rb_test_data *data = arg;
4784 
4785 	while (!kthread_should_stop()) {
4786 		rb_write_something(data, false);
4787 		data->cnt++;
4788 
4789 		set_current_state(TASK_INTERRUPTIBLE);
4790 		/* Now sleep between a min of 100-300us and a max of 1ms */
4791 		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4792 	}
4793 
4794 	return 0;
4795 }
4796 
4797 static __init void rb_ipi(void *ignore)
4798 {
4799 	struct rb_test_data *data;
4800 	int cpu = smp_processor_id();
4801 
4802 	data = &rb_data[cpu];
4803 	rb_write_something(data, true);
4804 }
4805 
4806 static __init int rb_hammer_test(void *arg)
4807 {
4808 	while (!kthread_should_stop()) {
4809 
4810 		/* Send an IPI to all cpus to write data! */
4811 		smp_call_function(rb_ipi, NULL, 1);
4812 		/* No sleep, but for non preempt, let others run */
4813 		schedule();
4814 	}
4815 
4816 	return 0;
4817 }
4818 
4819 static __init int test_ringbuffer(void)
4820 {
4821 	struct task_struct *rb_hammer;
4822 	struct ring_buffer *buffer;
4823 	int cpu;
4824 	int ret = 0;
4825 
4826 	pr_info("Running ring buffer tests...\n");
4827 
4828 	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4829 	if (WARN_ON(!buffer))
4830 		return 0;
4831 
4832 	/* Disable buffer so that threads can't write to it yet */
4833 	ring_buffer_record_off(buffer);
4834 
4835 	for_each_online_cpu(cpu) {
4836 		rb_data[cpu].buffer = buffer;
4837 		rb_data[cpu].cpu = cpu;
4838 		rb_data[cpu].cnt = cpu;
4839 		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4840 						 "rbtester/%d", cpu);
4841 		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
4842 			pr_cont("FAILED\n");
4843 			ret = PTR_ERR(rb_threads[cpu]);
4844 			goto out_free;
4845 		}
4846 
4847 		kthread_bind(rb_threads[cpu], cpu);
4848  		wake_up_process(rb_threads[cpu]);
4849 	}
4850 
4851 	/* Now create the rb hammer! */
4852 	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4853 	if (WARN_ON(IS_ERR(rb_hammer))) {
4854 		pr_cont("FAILED\n");
4855 		ret = PTR_ERR(rb_hammer);
4856 		goto out_free;
4857 	}
4858 
4859 	ring_buffer_record_on(buffer);
4860 	/*
4861 	 * Show buffer is enabled before setting rb_test_started.
4862 	 * Yes there's a small race window where events could be
4863 	 * dropped and the thread wont catch it. But when a ring
4864 	 * buffer gets enabled, there will always be some kind of
4865 	 * delay before other CPUs see it. Thus, we don't care about
4866 	 * those dropped events. We care about events dropped after
4867 	 * the threads see that the buffer is active.
4868 	 */
4869 	smp_wmb();
4870 	rb_test_started = true;
4871 
4872 	set_current_state(TASK_INTERRUPTIBLE);
4873 	/* Just run for 10 seconds */;
4874 	schedule_timeout(10 * HZ);
4875 
4876 	kthread_stop(rb_hammer);
4877 
4878  out_free:
4879 	for_each_online_cpu(cpu) {
4880 		if (!rb_threads[cpu])
4881 			break;
4882 		kthread_stop(rb_threads[cpu]);
4883 	}
4884 	if (ret) {
4885 		ring_buffer_free(buffer);
4886 		return ret;
4887 	}
4888 
4889 	/* Report! */
4890 	pr_info("finished\n");
4891 	for_each_online_cpu(cpu) {
4892 		struct ring_buffer_event *event;
4893 		struct rb_test_data *data = &rb_data[cpu];
4894 		struct rb_item *item;
4895 		unsigned long total_events;
4896 		unsigned long total_dropped;
4897 		unsigned long total_written;
4898 		unsigned long total_alloc;
4899 		unsigned long total_read = 0;
4900 		unsigned long total_size = 0;
4901 		unsigned long total_len = 0;
4902 		unsigned long total_lost = 0;
4903 		unsigned long lost;
4904 		int big_event_size;
4905 		int small_event_size;
4906 
4907 		ret = -1;
4908 
4909 		total_events = data->events + data->events_nested;
4910 		total_written = data->bytes_written + data->bytes_written_nested;
4911 		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4912 		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4913 
4914 		big_event_size = data->max_size + data->max_size_nested;
4915 		small_event_size = data->min_size + data->min_size_nested;
4916 
4917 		pr_info("CPU %d:\n", cpu);
4918 		pr_info("              events:    %ld\n", total_events);
4919 		pr_info("       dropped bytes:    %ld\n", total_dropped);
4920 		pr_info("       alloced bytes:    %ld\n", total_alloc);
4921 		pr_info("       written bytes:    %ld\n", total_written);
4922 		pr_info("       biggest event:    %d\n", big_event_size);
4923 		pr_info("      smallest event:    %d\n", small_event_size);
4924 
4925 		if (RB_WARN_ON(buffer, total_dropped))
4926 			break;
4927 
4928 		ret = 0;
4929 
4930 		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4931 			total_lost += lost;
4932 			item = ring_buffer_event_data(event);
4933 			total_len += ring_buffer_event_length(event);
4934 			total_size += item->size + sizeof(struct rb_item);
4935 			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4936 				pr_info("FAILED!\n");
4937 				pr_info("buffer had: %.*s\n", item->size, item->str);
4938 				pr_info("expected:   %.*s\n", item->size, rb_string);
4939 				RB_WARN_ON(buffer, 1);
4940 				ret = -1;
4941 				break;
4942 			}
4943 			total_read++;
4944 		}
4945 		if (ret)
4946 			break;
4947 
4948 		ret = -1;
4949 
4950 		pr_info("         read events:   %ld\n", total_read);
4951 		pr_info("         lost events:   %ld\n", total_lost);
4952 		pr_info("        total events:   %ld\n", total_lost + total_read);
4953 		pr_info("  recorded len bytes:   %ld\n", total_len);
4954 		pr_info(" recorded size bytes:   %ld\n", total_size);
4955 		if (total_lost)
4956 			pr_info(" With dropped events, record len and size may not match\n"
4957 				" alloced and written from above\n");
4958 		if (!total_lost) {
4959 			if (RB_WARN_ON(buffer, total_len != total_alloc ||
4960 				       total_size != total_written))
4961 				break;
4962 		}
4963 		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4964 			break;
4965 
4966 		ret = 0;
4967 	}
4968 	if (!ret)
4969 		pr_info("Ring buffer PASSED!\n");
4970 
4971 	ring_buffer_free(buffer);
4972 	return 0;
4973 }
4974 
4975 late_initcall(test_ringbuffer);
4976 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
4977