Lines Matching +full:cpu +full:- +full:read

1 // SPDX-License-Identifier: GPL-2.0
28 #include <linux/cpu.h>
90 * allocated for each CPU. A writer may only write to a buffer that is
91 * associated with the CPU it is currently executing on. A reader may read
92 * from any per cpu buffer.
94 * The reader is special. For each per cpu buffer, the reader has its own
95 * reader page. When a reader has read the entire reader page, this reader
104 * +------+
107 * +------+ +---+ +---+ +---+
108 * | |-->| |-->| |
109 * +---+ +---+ +---+
112 * +---------------+
115 * +------+
117 * |page |------------------v
118 * +------+ +---+ +---+ +---+
119 * | |-->| |-->| |
120 * +---+ +---+ +---+
123 * +---------------+
126 * +------+
128 * |page |------------------v
129 * +------+ +---+ +---+ +---+
130 * ^ | |-->| |-->| |
131 * | +---+ +---+ +---+
134 * +------------------------------+
137 * +------+
139 * |page |------------------v
140 * +------+ +---+ +---+ +---+
141 * ^ | | | |-->| |
142 * | New +---+ +---+ +---+
143 * | Reader------^ |
145 * +------------------------------+
188 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
192 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
198 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
199 event->time_delta = 0; in rb_event_set_padding()
207 if (event->type_len) in rb_event_data_length()
208 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
210 length = event->array[0]; in rb_event_data_length()
222 switch (event->type_len) { in rb_event_length()
226 return -1; in rb_event_length()
227 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
262 * ring_buffer_event_length - return the length of the event
279 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
281 length -= RB_EVNT_HDR_SIZE; in ring_buffer_event_length()
282 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
283 length -= sizeof(event->array[0]); in ring_buffer_event_length()
294 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
296 if (event->type_len) in rb_event_data()
297 return (void *)&event->array[0]; in rb_event_data()
299 return (void *)&event->array[1]; in rb_event_data()
303 * ring_buffer_event_data - return the data of the event
312 #define for_each_buffer_cpu(buffer, cpu) \ argument
313 for_each_cpu(cpu, buffer->cpumask)
315 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
316 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
319 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
326 ts = event->array[0]; in rb_event_time_stamp()
328 ts += event->time_delta; in rb_event_time_stamp()
362 unsigned read; /* index for next read */ member
388 local_set(&bpage->commit, 0); in rb_init_page()
393 return local_read(&bpage->page->commit); in rb_page_commit()
399 if (!bpage->range) in free_buffer_page()
400 free_pages((unsigned long)bpage->page, bpage->order); in free_buffer_page()
438 * EXTEND - wants a time extend
439 * ABSOLUTE - the buffer requests all events to have absolute time stamps
440 * FORCE - force a full time stamp.
478 int cpu; member
491 struct buffer_page *head_page; /* read from head */
510 unsigned long read; member
603 (unsigned int)buffer->subbuf_size, in ring_buffer_print_page_header()
611 *ret = local64_read(&t->time); in rb_time_read()
615 local64_set(&t->time, val); in rb_time_set()
629 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
630 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
641 commit = local_read(&page->page->commit); in verify_event()
642 write = local_read(&page->write); in verify_event()
643 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
644 addr < (unsigned long)&page->page->data[write]) in verify_event()
647 next = rb_list_head(page->list.next); in verify_event()
681 * ring_buffer_event_time_stamp - return the event's current time stamp
700 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
705 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { in ring_buffer_event_time_stamp()
707 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
710 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
715 /* Read the current saved nesting level time stamp */ in ring_buffer_event_time_stamp()
716 if (likely(--nest < MAX_NEST)) in ring_buffer_event_time_stamp()
717 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
723 rb_time_read(&cpu_buffer->write_stamp, &ts); in ring_buffer_event_time_stamp()
729 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
731 * @cpu: The cpu of the ring_buffer to get the number of pages from
735 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
737 size_t read; in ring_buffer_nr_dirty_pages() local
741 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
742 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
743 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
748 cnt -= lost; in ring_buffer_nr_dirty_pages()
750 /* The reader can read an empty page, but not more than that */ in ring_buffer_nr_dirty_pages()
751 if (cnt < read) { in ring_buffer_nr_dirty_pages()
752 WARN_ON_ONCE(read > cnt + 1); in ring_buffer_nr_dirty_pages()
756 return cnt - read; in ring_buffer_nr_dirty_pages()
759 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
765 nr_pages = cpu_buffer->nr_pages; in full_hit()
770 * Add one as dirty will never equal nr_pages, as the sub-buffer in full_hit()
774 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
780 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
790 (void)atomic_fetch_inc_release(&rbwork->seq); in rb_wake_up_waiters()
792 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
793 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { in rb_wake_up_waiters()
799 raw_spin_lock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
800 rbwork->wakeup_full = false; in rb_wake_up_waiters()
801 rbwork->full_waiters_pending = false; in rb_wake_up_waiters()
804 cpu_buffer->shortest_full = 0; in rb_wake_up_waiters()
805 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
807 wake_up_all(&rbwork->full_waiters); in rb_wake_up_waiters()
812 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
814 * @cpu: The CPU buffer to wake waiters on
819 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
827 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wake_waiters()
830 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
831 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
833 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
835 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
837 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) in ring_buffer_wake_waiters()
840 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
841 /* The CPU buffer may not have been initialized yet */ in ring_buffer_wake_waiters()
844 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
848 irq_work_queue(&rbwork->work); in ring_buffer_wake_waiters()
851 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) in rb_watermark_hit() argument
857 if (cpu == RING_BUFFER_ALL_CPUS) in rb_watermark_hit()
860 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
862 if (!ring_buffer_empty_cpu(buffer, cpu)) { in rb_watermark_hit()
869 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
870 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_watermark_hit()
871 ret = !pagebusy && full_hit(buffer, cpu, full); in rb_watermark_hit()
873 if (!ret && (!cpu_buffer->shortest_full || in rb_watermark_hit()
874 cpu_buffer->shortest_full > full)) { in rb_watermark_hit()
875 cpu_buffer->shortest_full = full; in rb_watermark_hit()
877 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
884 int cpu, int full, ring_buffer_cond_fn cond, void *data) in rb_wait_cond() argument
886 if (rb_watermark_hit(buffer, cpu, full)) in rb_wait_cond()
913 rbwork->full_waiters_pending = true; in rb_wait_cond()
915 rbwork->waiters_pending = true; in rb_wait_cond()
932 struct rb_irq_work *rbwork = rdata->irq_work; in rb_wait_once()
934 return atomic_read_acquire(&rbwork->seq) != rdata->seq; in rb_wait_once()
938 * ring_buffer_wait - wait for input to the ring buffer
940 * @cpu: the cpu buffer to wait on
941 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
945 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
946 * as data is added to any of the @buffer's cpu buffers. Otherwise
947 * it will wait for data to be added to a specific cpu buffer.
949 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, in ring_buffer_wait() argument
960 * data in any cpu buffer, or a specific buffer, put the in ring_buffer_wait()
963 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wait()
964 rbwork = &buffer->irq_work; in ring_buffer_wait()
965 /* Full only makes sense on per cpu reads */ in ring_buffer_wait()
968 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
969 return -ENODEV; in ring_buffer_wait()
970 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
971 rbwork = &cpu_buffer->irq_work; in ring_buffer_wait()
975 waitq = &rbwork->full_waiters; in ring_buffer_wait()
977 waitq = &rbwork->waiters; in ring_buffer_wait()
983 rdata.seq = atomic_read_acquire(&rbwork->seq); in ring_buffer_wait()
988 rb_wait_cond(rbwork, buffer, cpu, full, cond, data)); in ring_buffer_wait()
994 * ring_buffer_poll_wait - poll on buffer input
996 * @cpu: the cpu buffer to wait on
999 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
1001 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1002 * as data is added to any of the @buffer's cpu buffers. Otherwise
1003 * it will wait for data to be added to a specific cpu buffer.
1008 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1014 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_poll_wait()
1015 rbwork = &buffer->irq_work; in ring_buffer_poll_wait()
1018 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1021 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1022 rbwork = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1026 poll_wait(filp, &rbwork->full_waiters, poll_table); in ring_buffer_poll_wait()
1028 if (rb_watermark_hit(buffer, cpu, full)) in ring_buffer_poll_wait()
1043 rbwork->full_waiters_pending = true; in ring_buffer_poll_wait()
1047 poll_wait(filp, &rbwork->waiters, poll_table); in ring_buffer_poll_wait()
1048 rbwork->waiters_pending = true; in ring_buffer_poll_wait()
1065 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1066 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1079 atomic_inc(&__b->buffer->record_disabled); \
1081 atomic_inc(&b->record_disabled); \
1094 /* Skip retpolines :-( */ in rb_time_stamp()
1095 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1098 ts = buffer->clock(); in rb_time_stamp()
1117 int cpu, u64 *ts) in ring_buffer_normalize_time_stamp() argument
1126 * Although writes only happen on the CPU that they are on,
1128 * happen on any CPU.
1156 * head->list->prev->next bit 1 bit 0
1157 * ------- -------
1164 * +----+ +-----+ +-----+
1165 * | |------>| T |---X--->| N |
1166 * | |<------| | | |
1167 * +----+ +-----+ +-----+
1169 * | +-----+ | |
1170 * +----------| R |----------+ |
1171 * | |<-----------+
1172 * +-----+
1174 * Key: ---X--> HEAD flag set in pointer
1204 * rb_list_head - remove any bit
1214 * rb_is_head_page - test if the given page is the head page
1226 val = (unsigned long)list->next; in rb_is_head_page()
1228 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1243 struct list_head *list = page->list.prev; in rb_is_reader_page()
1245 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1249 * rb_set_list_to_head - set a list_head to be pointing to head.
1255 ptr = (unsigned long *)&list->next; in rb_set_list_to_head()
1261 * rb_head_page_activate - sets up head page
1267 head = cpu_buffer->head_page; in rb_head_page_activate()
1274 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1276 if (cpu_buffer->ring_meta) { in rb_head_page_activate()
1277 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_head_page_activate()
1278 meta->head_buffer = (unsigned long)head->page; in rb_head_page_activate()
1284 unsigned long *ptr = (unsigned long *)&list->next; in rb_list_head_clear()
1290 * rb_head_page_deactivate - clears head page ptr (for free list)
1298 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1300 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1310 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1313 list = &prev->list; in rb_head_page_set()
1317 ret = cmpxchg((unsigned long *)&list->next, in rb_head_page_set()
1356 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1363 struct list_head *p = rb_list_head((*bpage)->list.prev); in rb_dec_page()
1376 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1380 list = cpu_buffer->pages; in rb_set_head_page()
1381 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1384 page = head = cpu_buffer->head_page; in rb_set_head_page()
1393 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
1394 cpu_buffer->head_page = page; in rb_set_head_page()
1409 unsigned long *ptr = (unsigned long *)&old->list.prev->next; in rb_head_page_replace()
1415 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); in rb_head_page_replace()
1419 * rb_tail_page_update - move the tail page forward
1437 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update()
1438 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1451 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1466 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update()
1467 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1474 local_set(&next_page->page->commit, 0); in rb_tail_page_update()
1477 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page)) in rb_tail_page_update()
1478 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1494 rb_list_head(rb_list_head(list->next)->prev) != list)) in rb_check_links()
1498 rb_list_head(rb_list_head(list->prev)->next) != list)) in rb_check_links()
1505 * rb_check_pages - integrity check of buffer pages
1506 * @cpu_buffer: CPU buffer with pages to test
1525 * time when interrupts are disabled non-deterministic, dependent on the in rb_check_pages()
1526 * ring buffer size. Therefore, the code releases and re-acquires the in rb_check_pages()
1532 * giving up. This is acceptable because this is only a self-validation in rb_check_pages()
1541 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1542 head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1545 buffer_cnt = cpu_buffer->cnt; in rb_check_pages()
1547 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1550 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1552 if (buffer_cnt != cpu_buffer->cnt) { in rb_check_pages()
1554 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1558 tmp = rb_list_head(tmp->next); in rb_check_pages()
1566 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1570 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1577 * This is used to help find the next per cpu subbuffer within a mapped range.
1588 * Return the ring_buffer_meta for a given @cpu.
1590 static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu) in rb_range_meta() argument
1592 int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_range_meta()
1598 bmeta = buffer->meta; in rb_range_meta()
1602 ptr = (unsigned long)bmeta + bmeta->buffers_offset; in rb_range_meta()
1607 nr_subbufs = meta->nr_subbufs; in rb_range_meta()
1617 if (cpu) { in rb_range_meta()
1622 if (cpu > 1) { in rb_range_meta()
1626 /* Save the beginning of this CPU chunk */ in rb_range_meta()
1632 size = ptr - p; in rb_range_meta()
1633 ptr += size * (cpu - 2); in rb_range_meta()
1642 int subbuf_size = meta->subbuf_size; in rb_subbufs_from_meta()
1646 ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs); in rb_subbufs_from_meta()
1652 * Return a specific sub-buffer for a given @cpu defined by @idx.
1660 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); in rb_range_buffer()
1664 if (WARN_ON_ONCE(idx >= meta->nr_subbufs)) in rb_range_buffer()
1667 subbuf_size = meta->subbuf_size; in rb_range_buffer()
1669 /* Map this buffer to the order that's in meta->buffers[] */ in rb_range_buffer()
1670 idx = meta->buffers[idx]; in rb_range_buffer()
1675 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end) in rb_range_buffer()
1687 unsigned long ptr = buffer->range_addr_start; in rb_meta_init()
1693 buffer->meta = bmeta; in rb_meta_init()
1695 total_size = buffer->range_addr_end - buffer->range_addr_start; in rb_meta_init()
1705 if (bmeta->magic != RING_BUFFER_META_MAGIC) { in rb_meta_init()
1710 if (bmeta->struct_sizes != struct_sizes) { in rb_meta_init()
1715 if (bmeta->total_size != total_size) { in rb_meta_init()
1720 if (bmeta->buffers_offset > bmeta->total_size) { in rb_meta_init()
1725 if (bmeta->buffers_offset != (void *)ptr - (void *)bmeta) { in rb_meta_init()
1733 bmeta->magic = RING_BUFFER_META_MAGIC; in rb_meta_init()
1734 bmeta->struct_sizes = struct_sizes; in rb_meta_init()
1735 bmeta->total_size = total_size; in rb_meta_init()
1736 bmeta->buffers_offset = (void *)ptr - (void *)bmeta; in rb_meta_init()
1739 memset((void *)bmeta + sizeof(*bmeta), 0, bmeta->buffers_offset - sizeof(*bmeta)); in rb_meta_init()
1750 static bool rb_cpu_meta_valid(struct ring_buffer_cpu_meta *meta, int cpu, in rb_cpu_meta_valid() argument
1763 buffers_start = meta->first_buffer; in rb_cpu_meta_valid()
1764 buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs); in rb_cpu_meta_valid()
1767 if (meta->head_buffer < buffers_start || in rb_cpu_meta_valid()
1768 meta->head_buffer >= buffers_end) { in rb_cpu_meta_valid()
1769 pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu); in rb_cpu_meta_valid()
1773 if (meta->commit_buffer < buffers_start || in rb_cpu_meta_valid()
1774 meta->commit_buffer >= buffers_end) { in rb_cpu_meta_valid()
1775 pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu); in rb_cpu_meta_valid()
1781 bitmap_clear(subbuf_mask, 0, meta->nr_subbufs); in rb_cpu_meta_valid()
1784 for (i = 0; i < meta->nr_subbufs; i++) { in rb_cpu_meta_valid()
1785 if (meta->buffers[i] < 0 || in rb_cpu_meta_valid()
1786 meta->buffers[i] >= meta->nr_subbufs) { in rb_cpu_meta_valid()
1787 pr_info("Ring buffer boot meta [%d] array out of range\n", cpu); in rb_cpu_meta_valid()
1791 if ((unsigned)local_read(&subbuf->commit) > subbuf_size) { in rb_cpu_meta_valid()
1792 pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu); in rb_cpu_meta_valid()
1796 if (test_bit(meta->buffers[i], subbuf_mask)) { in rb_cpu_meta_valid()
1797 pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu); in rb_cpu_meta_valid()
1801 set_bit(meta->buffers[i], subbuf_mask); in rb_cpu_meta_valid()
1810 static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu, in rb_read_data_buffer() argument
1821 ts = dpage->time_stamp; in rb_read_data_buffer()
1825 event = (struct ring_buffer_event *)(dpage->data + e); in rb_read_data_buffer()
1827 switch (event->type_len) { in rb_read_data_buffer()
1840 return -1; in rb_read_data_buffer()
1846 if (event->time_delta == 1) in rb_read_data_buffer()
1851 ts += event->time_delta; in rb_read_data_buffer()
1855 return -1; in rb_read_data_buffer()
1862 static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu) in rb_validate_buffer() argument
1868 tail = local_read(&dpage->commit); in rb_validate_buffer()
1869 return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta); in rb_validate_buffer()
1875 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_meta_validate_events()
1883 if (!meta || !meta->head_buffer) in rb_meta_validate_events()
1887 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1893 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); in rb_meta_validate_events()
1894 local_set(&cpu_buffer->reader_page->entries, ret); in rb_meta_validate_events()
1896 orig_head = head_page = cpu_buffer->head_page; in rb_meta_validate_events()
1897 ts = head_page->page->time_stamp; in rb_meta_validate_events()
1900 * Try to rewind the head so that we can read the pages which already in rb_meta_validate_events()
1901 * read in the previous boot. in rb_meta_validate_events()
1903 if (head_page == cpu_buffer->tail_page) in rb_meta_validate_events()
1907 for (i = 0; i < meta->nr_subbufs + 1; i++, rb_dec_page(&head_page)) { in rb_meta_validate_events()
1910 if (head_page == cpu_buffer->tail_page) in rb_meta_validate_events()
1914 if (ts < head_page->page->time_stamp) in rb_meta_validate_events()
1917 ts = head_page->page->time_stamp; in rb_meta_validate_events()
1923 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1928 local_set(&head_page->entries, ret); in rb_meta_validate_events()
1930 local_inc(&cpu_buffer->pages_touched); in rb_meta_validate_events()
1935 pr_info("Ring buffer [%d] rewound %d pages\n", cpu_buffer->cpu, i); in rb_meta_validate_events()
1954 cpu_buffer->reader_page->list.next = &orig_head->list; in rb_meta_validate_events()
1955 cpu_buffer->reader_page->list.prev = orig_head->list.prev; in rb_meta_validate_events()
1956 orig_head->list.prev = &cpu_buffer->reader_page->list; in rb_meta_validate_events()
1957 bpage->list.next = &cpu_buffer->reader_page->list; in rb_meta_validate_events()
1960 cpu_buffer->reader_page = head_page; in rb_meta_validate_events()
1963 head_page->list.prev = bpage->list.prev; in rb_meta_validate_events()
1965 bpage->list.next = &head_page->list; in rb_meta_validate_events()
1966 rb_set_list_to_head(&bpage->list); in rb_meta_validate_events()
1967 cpu_buffer->pages = &head_page->list; in rb_meta_validate_events()
1969 cpu_buffer->head_page = head_page; in rb_meta_validate_events()
1970 meta->head_buffer = (unsigned long)head_page->page; in rb_meta_validate_events()
1973 bpage = cpu_buffer->reader_page; in rb_meta_validate_events()
1974 meta->buffers[0] = rb_meta_subbuf_idx(meta, bpage->page); in rb_meta_validate_events()
1975 bpage->id = 0; in rb_meta_validate_events()
1977 for (i = 1, bpage = head_page; i < meta->nr_subbufs; in rb_meta_validate_events()
1979 meta->buffers[i] = rb_meta_subbuf_idx(meta, bpage->page); in rb_meta_validate_events()
1980 bpage->id = i; in rb_meta_validate_events()
1989 if (meta->commit_buffer == (unsigned long)cpu_buffer->reader_page->page) { in rb_meta_validate_events()
1990 cpu_buffer->commit_page = cpu_buffer->reader_page; in rb_meta_validate_events()
1996 for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) { in rb_meta_validate_events()
1999 if (head_page == cpu_buffer->reader_page) in rb_meta_validate_events()
2002 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
2005 cpu_buffer->cpu); in rb_meta_validate_events()
2011 local_inc(&cpu_buffer->pages_touched); in rb_meta_validate_events()
2014 entry_bytes += local_read(&head_page->page->commit); in rb_meta_validate_events()
2015 local_set(&cpu_buffer->head_page->entries, ret); in rb_meta_validate_events()
2017 if (head_page == cpu_buffer->commit_page) in rb_meta_validate_events()
2021 if (head_page != cpu_buffer->commit_page) { in rb_meta_validate_events()
2023 cpu_buffer->cpu); in rb_meta_validate_events()
2027 local_set(&cpu_buffer->entries, entries); in rb_meta_validate_events()
2028 local_set(&cpu_buffer->entries_bytes, entry_bytes); in rb_meta_validate_events()
2030 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); in rb_meta_validate_events()
2035 meta->head_buffer = 0; in rb_meta_validate_events()
2036 meta->commit_buffer = 0; in rb_meta_validate_events()
2039 local_set(&cpu_buffer->reader_page->entries, 0); in rb_meta_validate_events()
2040 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_meta_validate_events()
2043 for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) { in rb_meta_validate_events()
2044 local_set(&head_page->entries, 0); in rb_meta_validate_events()
2045 local_set(&head_page->page->commit, 0); in rb_meta_validate_events()
2056 int cpu; in rb_range_meta_init() local
2066 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in rb_range_meta_init()
2069 meta = rb_range_meta(buffer, nr_pages, cpu); in rb_range_meta_init()
2071 if (valid && rb_cpu_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) { in rb_range_meta_init()
2074 delta = (unsigned long)subbuf - meta->first_buffer; in rb_range_meta_init()
2075 meta->first_buffer += delta; in rb_range_meta_init()
2076 meta->head_buffer += delta; in rb_range_meta_init()
2077 meta->commit_buffer += delta; in rb_range_meta_init()
2081 if (cpu < nr_cpu_ids - 1) in rb_range_meta_init()
2082 next_meta = rb_range_meta(buffer, nr_pages, cpu + 1); in rb_range_meta_init()
2084 next_meta = (void *)buffer->range_addr_end; in rb_range_meta_init()
2086 memset(meta, 0, next_meta - (void *)meta); in rb_range_meta_init()
2088 meta->nr_subbufs = nr_pages + 1; in rb_range_meta_init()
2089 meta->subbuf_size = PAGE_SIZE; in rb_range_meta_init()
2093 meta->first_buffer = (unsigned long)subbuf; in rb_range_meta_init()
2096 * The buffers[] array holds the order of the sub-buffers in rb_range_meta_init()
2097 * that are after the meta data. The sub-buffers may in rb_range_meta_init()
2098 * be swapped out when read and inserted into a different in rb_range_meta_init()
2101 * index into the sub-buffers holding their actual order. in rb_range_meta_init()
2103 for (i = 0; i < meta->nr_subbufs; i++) { in rb_range_meta_init()
2104 meta->buffers[i] = i; in rb_range_meta_init()
2106 subbuf += meta->subbuf_size; in rb_range_meta_init()
2114 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start()
2115 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rbm_start()
2121 if (*pos > meta->nr_subbufs) in rbm_start()
2139 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show()
2140 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rbm_show()
2145 rb_meta_subbuf_idx(meta, (void *)meta->head_buffer)); in rbm_show()
2147 rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer)); in rbm_show()
2148 seq_printf(m, "subbuf_size: %d\n", meta->subbuf_size); in rbm_show()
2149 seq_printf(m, "nr_subbufs: %d\n", meta->nr_subbufs); in rbm_show()
2153 val -= 2; in rbm_show()
2154 seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]); in rbm_show()
2170 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu) in ring_buffer_meta_seq_init() argument
2179 m = file->private_data; in ring_buffer_meta_seq_init()
2180 m->private = buffer->buffers[cpu]; in ring_buffer_meta_seq_init()
2189 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_meta_buffer_update()
2191 if (meta->head_buffer == (unsigned long)bpage->page) in rb_meta_buffer_update()
2192 cpu_buffer->head_page = bpage; in rb_meta_buffer_update()
2194 if (meta->commit_buffer == (unsigned long)bpage->page) { in rb_meta_buffer_update()
2195 cpu_buffer->commit_page = bpage; in rb_meta_buffer_update()
2196 cpu_buffer->tail_page = bpage; in rb_meta_buffer_update()
2203 struct trace_buffer *buffer = cpu_buffer->buffer; in __rb_allocate_pages()
2206 bool user_thread = current->mm != NULL; in __rb_allocate_pages()
2219 return -ENOMEM; in __rb_allocate_pages()
2223 * gracefully without invoking oom-killer and the system is not in __rb_allocate_pages()
2240 if (buffer->range_addr_start) in __rb_allocate_pages()
2241 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); in __rb_allocate_pages()
2247 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
2257 list_add_tail(&bpage->list, pages); in __rb_allocate_pages()
2261 bpage->page = rb_range_buffer(cpu_buffer, i + 1); in __rb_allocate_pages()
2262 if (!bpage->page) in __rb_allocate_pages()
2265 if (meta->head_buffer) in __rb_allocate_pages()
2267 bpage->range = 1; in __rb_allocate_pages()
2268 bpage->id = i + 1; in __rb_allocate_pages()
2270 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), in __rb_allocate_pages()
2272 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
2275 bpage->page = page_address(page); in __rb_allocate_pages()
2276 rb_init_page(bpage->page); in __rb_allocate_pages()
2278 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
2290 list_del_init(&bpage->list); in __rb_allocate_pages()
2296 return -ENOMEM; in __rb_allocate_pages()
2307 return -ENOMEM; in rb_allocate_pages()
2314 cpu_buffer->pages = pages.next; in rb_allocate_pages()
2317 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
2325 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
2334 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
2338 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
2339 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
2340 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
2341 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
2342 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
2343 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
2344 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
2345 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
2346 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
2347 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
2348 mutex_init(&cpu_buffer->mapping_lock); in rb_allocate_cpu_buffer()
2351 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
2357 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
2359 if (buffer->range_addr_start) { in rb_allocate_cpu_buffer()
2364 cpu_buffer->mapped = 1; in rb_allocate_cpu_buffer()
2365 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); in rb_allocate_cpu_buffer()
2366 bpage->page = rb_range_buffer(cpu_buffer, 0); in rb_allocate_cpu_buffer()
2367 if (!bpage->page) in rb_allocate_cpu_buffer()
2369 if (cpu_buffer->ring_meta->head_buffer) in rb_allocate_cpu_buffer()
2371 bpage->range = 1; in rb_allocate_cpu_buffer()
2373 page = alloc_pages_node(cpu_to_node(cpu), in rb_allocate_cpu_buffer()
2375 cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
2378 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
2379 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
2382 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
2383 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
2392 meta = cpu_buffer->ring_meta; in rb_allocate_cpu_buffer()
2393 if (!meta || !meta->head_buffer || in rb_allocate_cpu_buffer()
2394 !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) { in rb_allocate_cpu_buffer()
2395 if (meta && meta->head_buffer && in rb_allocate_cpu_buffer()
2396 (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) { in rb_allocate_cpu_buffer()
2398 if (!cpu_buffer->head_page) in rb_allocate_cpu_buffer()
2400 if (!cpu_buffer->commit_page) in rb_allocate_cpu_buffer()
2402 if (!cpu_buffer->tail_page) in rb_allocate_cpu_buffer()
2406 cpu_buffer->head_page in rb_allocate_cpu_buffer()
2407 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
2408 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
2412 if (cpu_buffer->ring_meta) in rb_allocate_cpu_buffer()
2413 meta->commit_buffer = meta->head_buffer; in rb_allocate_cpu_buffer()
2422 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
2429 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
2432 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
2434 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
2440 list_del_init(&bpage->list); in rb_free_cpu_buffer()
2447 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
2462 int cpu; in alloc_buffer() local
2471 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in alloc_buffer()
2474 buffer->subbuf_order = order; in alloc_buffer()
2476 buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE; in alloc_buffer()
2478 /* Max payload is buffer page size - header (8bytes) */ in alloc_buffer()
2479 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2); in alloc_buffer()
2481 buffer->flags = flags; in alloc_buffer()
2482 buffer->clock = trace_clock_local; in alloc_buffer()
2483 buffer->reader_lock_key = key; in alloc_buffer()
2485 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in alloc_buffer()
2486 init_waitqueue_head(&buffer->irq_work.waiters); in alloc_buffer()
2488 buffer->cpus = nr_cpu_ids; in alloc_buffer()
2491 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in alloc_buffer()
2493 if (!buffer->buffers) in alloc_buffer()
2513 /* Calculate the size for the per CPU data */ in alloc_buffer()
2514 size = end - buffers_start; in alloc_buffer()
2518 * The number of sub-buffers (nr_pages) is determined by the in alloc_buffer()
2520 * Then that is divided by the number of per CPU buffers in alloc_buffer()
2524 nr_pages = (size - sizeof(struct ring_buffer_cpu_meta)) / in alloc_buffer()
2541 nr_pages--; in alloc_buffer()
2546 nr_pages--; in alloc_buffer()
2547 buffer->range_addr_start = start; in alloc_buffer()
2548 buffer->range_addr_end = end; in alloc_buffer()
2554 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in alloc_buffer()
2559 cpu = raw_smp_processor_id(); in alloc_buffer()
2560 cpumask_set_cpu(cpu, buffer->cpumask); in alloc_buffer()
2561 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in alloc_buffer()
2562 if (!buffer->buffers[cpu]) in alloc_buffer()
2565 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in alloc_buffer()
2569 mutex_init(&buffer->mutex); in alloc_buffer()
2574 for_each_buffer_cpu(buffer, cpu) { in alloc_buffer()
2575 if (buffer->buffers[cpu]) in alloc_buffer()
2576 rb_free_cpu_buffer(buffer->buffers[cpu]); in alloc_buffer()
2578 kfree(buffer->buffers); in alloc_buffer()
2581 free_cpumask_var(buffer->cpumask); in alloc_buffer()
2587 * __ring_buffer_alloc - allocate a new ring_buffer
2588 * @size: the size in bytes per cpu that is needed.
2600 /* Default buffer page size - one system page */ in __ring_buffer_alloc()
2607 * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
2608 * @size: the size in bytes per cpu that is needed.
2610 * @order: sub-buffer order
2636 if (!buffer || !buffer->meta) in ring_buffer_meta_scratch()
2639 meta = buffer->meta; in ring_buffer_meta_scratch()
2644 *size = (void *)meta + meta->buffers_offset - ptr; in ring_buffer_meta_scratch()
2650 * ring_buffer_free - free a ring buffer.
2656 int cpu; in ring_buffer_free() local
2658 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
2660 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
2662 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
2663 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
2665 kfree(buffer->buffers); in ring_buffer_free()
2666 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
2675 buffer->clock = clock; in ring_buffer_set_clock()
2680 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
2685 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
2690 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
2695 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
2710 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2711 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
2721 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
2727 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
2728 tail_page = rb_list_head(tail_page->next); in rb_remove_pages()
2732 first_page = list_entry(rb_list_head(to_remove->next), in rb_remove_pages()
2736 to_remove = rb_list_head(to_remove)->next; in rb_remove_pages()
2739 /* Read iterators need to reset themselves when some pages removed */ in rb_remove_pages()
2740 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
2742 next_page = rb_list_head(to_remove)->next; in rb_remove_pages()
2749 tail_page->next = (struct list_head *)((unsigned long)next_page | in rb_remove_pages()
2752 next_page->prev = tail_page; in rb_remove_pages()
2755 cpu_buffer->pages = next_page; in rb_remove_pages()
2756 cpu_buffer->cnt++; in rb_remove_pages()
2760 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
2764 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
2765 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2767 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2789 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2790 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2791 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2799 nr_removed--; in rb_remove_pages()
2811 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2817 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2825 * 2. We cmpxchg the prev_page->next to point from head page to the in rb_insert_pages()
2827 * 3. Finally, we update the head->prev to the end of new list. in rb_insert_pages()
2834 while (retries--) { in rb_insert_pages()
2842 head_page = &hpage->list; in rb_insert_pages()
2843 prev_page = head_page->prev; in rb_insert_pages()
2845 first_page = pages->next; in rb_insert_pages()
2846 last_page = pages->prev; in rb_insert_pages()
2851 last_page->next = head_page_with_bit; in rb_insert_pages()
2852 first_page->prev = prev_page; in rb_insert_pages()
2855 if (try_cmpxchg(&prev_page->next, in rb_insert_pages()
2862 head_page->prev = last_page; in rb_insert_pages()
2863 cpu_buffer->cnt++; in rb_insert_pages()
2876 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2881 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2883 list_del_init(&bpage->list); in rb_insert_pages()
2894 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2898 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2901 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2909 complete(&cpu_buffer->update_done); in update_pages_handler()
2913 * ring_buffer_resize - resize the ring buffer
2916 * @cpu_id: the cpu buffer to resize
2918 * Minimum size is 2 * buffer->subbuf_size.
2927 int cpu, err; in ring_buffer_resize() local
2930 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2937 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2940 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in ring_buffer_resize()
2948 * with new per CPU buffers being created. in ring_buffer_resize()
2953 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2954 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2962 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2964 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2965 err = -EBUSY; in ring_buffer_resize()
2971 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2972 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2974 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2975 cpu_buffer->nr_pages; in ring_buffer_resize()
2979 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2985 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2986 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2987 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2989 err = -ENOMEM; in ring_buffer_resize()
3001 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3002 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3003 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
3006 /* Can't run something on an offline CPU. */ in ring_buffer_resize()
3007 if (!cpu_online(cpu)) { in ring_buffer_resize()
3009 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3013 if (cpu != smp_processor_id()) { in ring_buffer_resize()
3015 schedule_work_on(cpu, in ring_buffer_resize()
3016 &cpu_buffer->update_pages_work); in ring_buffer_resize()
3018 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
3025 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3026 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3027 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
3030 if (cpu_online(cpu)) in ring_buffer_resize()
3031 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
3032 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3036 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
3038 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
3046 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
3047 err = -EBUSY; in ring_buffer_resize()
3051 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
3052 cpu_buffer->nr_pages; in ring_buffer_resize()
3054 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
3055 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
3056 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
3057 &cpu_buffer->new_pages)) { in ring_buffer_resize()
3058 err = -ENOMEM; in ring_buffer_resize()
3062 /* Can't run something on an offline CPU. */ in ring_buffer_resize()
3074 &cpu_buffer->update_pages_work); in ring_buffer_resize()
3075 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
3079 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3090 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
3091 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
3099 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3103 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
3106 atomic_dec(&buffer->resizing); in ring_buffer_resize()
3107 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
3111 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3114 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3115 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3117 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
3120 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
3122 list_del_init(&bpage->list); in ring_buffer_resize()
3127 atomic_dec(&buffer->resizing); in ring_buffer_resize()
3128 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
3135 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
3137 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
3139 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
3140 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
3146 return bpage->page->data + index; in __rb_page_index()
3152 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
3153 cpu_buffer->reader_page->read); in rb_reader_event()
3160 struct buffer_page *iter_head_page = iter->head_page; in rb_iter_head_event()
3164 if (iter->head != iter->next_event) in rb_iter_head_event()
3165 return iter->event; in rb_iter_head_event()
3176 if (iter->head > commit - 8) in rb_iter_head_event()
3179 event = __rb_page_index(iter_head_page, iter->head); in rb_iter_head_event()
3188 if ((iter->head + length) > commit || length > iter->event_size) in rb_iter_head_event()
3189 /* Writer corrupted the read? */ in rb_iter_head_event()
3192 memcpy(iter->event, event, length); in rb_iter_head_event()
3199 /* Make sure the page didn't change since we read this */ in rb_iter_head_event()
3200 if (iter->page_stamp != iter_head_page->page->time_stamp || in rb_iter_head_event()
3204 iter->next_event = iter->head + length; in rb_iter_head_event()
3205 return iter->event; in rb_iter_head_event()
3208 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_head_event()
3209 iter->head = 0; in rb_iter_head_event()
3210 iter->next_event = 0; in rb_iter_head_event()
3211 iter->missed_events = 1; in rb_iter_head_event()
3224 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
3232 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
3234 return addr - BUF_PAGE_HDR_SIZE; in rb_event_index()
3239 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
3247 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
3248 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
3250 rb_inc_page(&iter->head_page); in rb_inc_iter()
3252 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter()
3253 iter->head = 0; in rb_inc_iter()
3254 iter->next_event = 0; in rb_inc_iter()
3257 /* Return the index into the sub-buffers for a given sub-buffer */
3262 subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs; in rb_meta_subbuf_idx()
3263 subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size); in rb_meta_subbuf_idx()
3264 return (subbuf - subbuf_array) / meta->subbuf_size; in rb_meta_subbuf_idx()
3270 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_head()
3271 unsigned long old_head = (unsigned long)next_page->page; in rb_update_meta_head()
3275 new_head = (unsigned long)next_page->page; in rb_update_meta_head()
3281 (void)cmpxchg(&meta->head_buffer, old_head, new_head); in rb_update_meta_head()
3287 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_reader()
3288 void *old_reader = cpu_buffer->reader_page->page; in rb_update_meta_reader()
3289 void *new_reader = reader->page; in rb_update_meta_reader()
3292 id = reader->id; in rb_update_meta_reader()
3293 cpu_buffer->reader_page->id = id; in rb_update_meta_reader()
3294 reader->id = 0; in rb_update_meta_reader()
3296 meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader); in rb_update_meta_reader()
3297 meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader); in rb_update_meta_reader()
3304 * rb_handle_head_page - writer hit the head page
3308 * -1 on error
3332 * NORMAL - an interrupt already moved it for us in rb_handle_head_page()
3333 * HEAD - we are the first to get here. in rb_handle_head_page()
3334 * UPDATE - we are the interrupt interrupting in rb_handle_head_page()
3336 * MOVED - a reader on another CPU moved the next in rb_handle_head_page()
3348 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
3349 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
3350 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
3352 if (cpu_buffer->ring_meta) in rb_handle_head_page()
3377 * The reader is on another CPU and just did in rb_handle_head_page()
3384 return -1; in rb_handle_head_page()
3391 * The reader (on another CPU) will spin till in rb_handle_head_page()
3409 * HEAD - an interrupt came in and already set it. in rb_handle_head_page()
3410 * NORMAL - One of two things: in rb_handle_head_page()
3422 return -1; in rb_handle_head_page()
3438 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
3461 return -1; in rb_handle_head_page()
3471 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
3472 struct buffer_page *tail_page = info->tail_page; in rb_reset_tail()
3474 unsigned long length = info->length; in rb_reset_tail()
3487 tail_page->real_end = 0; in rb_reset_tail()
3489 local_sub(length, &tail_page->write); in rb_reset_tail()
3500 tail_page->real_end = tail; in rb_reset_tail()
3514 if (tail > (bsize - RB_EVNT_MIN_SIZE)) { in rb_reset_tail()
3524 local_sub(length, &tail_page->write); in rb_reset_tail()
3529 event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
3530 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
3532 event->time_delta = 1; in rb_reset_tail()
3535 local_add(bsize - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
3537 /* Make sure the padding is visible before the tail_page->write update */ in rb_reset_tail()
3541 length = (tail + length) - bsize; in rb_reset_tail()
3542 local_sub(length, &tail_page->write); in rb_reset_tail()
3554 struct buffer_page *tail_page = info->tail_page; in rb_move_tail()
3555 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
3556 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
3570 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3578 * could be on another CPU trying to swap its reader in rb_move_tail()
3588 if (rb_is_head_page(next_page, &tail_page->list)) { in rb_move_tail()
3594 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
3599 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
3600 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
3622 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
3623 cpu_buffer->tail_page) && in rb_move_tail()
3624 (cpu_buffer->commit_page == in rb_move_tail()
3625 cpu_buffer->reader_page))) { in rb_move_tail()
3626 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3641 local_inc(&cpu_buffer->committing); in rb_move_tail()
3644 return ERR_PTR(-EAGAIN); in rb_move_tail()
3659 event->type_len = RINGBUF_TYPE_TIME_STAMP; in rb_add_time_stamp()
3661 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
3665 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
3666 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
3669 event->time_delta = 0; in rb_add_time_stamp()
3670 event->array[0] = 0; in rb_add_time_stamp()
3690 (unsigned long long)info->delta, in rb_check_timestamp()
3691 (unsigned long long)info->ts, in rb_check_timestamp()
3692 (unsigned long long)info->before, in rb_check_timestamp()
3693 (unsigned long long)info->after, in rb_check_timestamp()
3694 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}), in rb_check_timestamp()
3708 bool abs = info->add_timestamp & in rb_add_timestamp()
3711 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
3716 if (abs && (info->ts & TS_MSB)) { in rb_add_timestamp()
3717 info->delta &= ABS_TS_MASK; in rb_add_timestamp()
3720 } else if (info->before == info->after && info->before > info->ts) { in rb_add_timestamp()
3730 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
3731 info->before, info->ts); in rb_add_timestamp()
3736 info->delta = 0; in rb_add_timestamp()
3738 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs); in rb_add_timestamp()
3739 *length -= RB_LEN_TIME_EXTEND; in rb_add_timestamp()
3744 * rb_update_event - update event type and data
3745 * @cpu_buffer: The per cpu buffer of the @event
3759 unsigned length = info->length; in rb_update_event()
3760 u64 delta = info->delta; in rb_update_event()
3761 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
3764 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
3770 if (unlikely(info->add_timestamp)) in rb_update_event()
3773 event->time_delta = delta; in rb_update_event()
3774 length -= RB_EVNT_HDR_SIZE; in rb_update_event()
3776 event->type_len = 0; in rb_update_event()
3777 event->array[0] = length; in rb_update_event()
3779 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
3825 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
3827 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3833 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
3835 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
3849 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3869 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { in rb_try_to_discard()
3871 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3882 local_inc(&cpu_buffer->committing); in rb_start_commit()
3883 local_inc(&cpu_buffer->commits); in rb_start_commit()
3892 * We only race with interrupts and NMIs on this CPU. in rb_set_commit_to_write()
3900 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3902 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3903 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3906 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3912 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3913 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3914 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3915 if (cpu_buffer->ring_meta) { in rb_set_commit_to_write()
3916 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_set_commit_to_write()
3917 meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page; in rb_set_commit_to_write()
3923 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3927 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3928 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3930 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3943 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3952 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3956 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3959 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3962 local_dec(&cpu_buffer->committing); in rb_end_commit()
3972 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3973 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3974 local_inc(&cpu_buffer->committing); in rb_end_commit()
3985 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
3986 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
3988 if (!event->time_delta) in rb_event_discard()
3989 event->time_delta = 1; in rb_event_discard()
3994 local_inc(&cpu_buffer->entries); in rb_commit()
4001 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
4002 buffer->irq_work.waiters_pending = false; in rb_wakeups()
4004 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
4007 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
4008 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
4010 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
4013 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
4016 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
4019 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
4022 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
4024 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
4027 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
4028 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
4030 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
4067 * 101 - 1 = 100
4070 * 1010 - 1 = 1001
4105 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
4108 bit = RB_CTX_NORMAL - bit; in trace_recursive_lock()
4110 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
4117 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
4123 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
4124 cpu_buffer->current_context = val; in trace_recursive_lock()
4132 cpu_buffer->current_context &= in trace_recursive_unlock()
4133 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
4140 * ring_buffer_nest_start - Allow to trace while nested
4155 int cpu; in ring_buffer_nest_start() local
4159 cpu = raw_smp_processor_id(); in ring_buffer_nest_start()
4160 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
4162 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
4166 * ring_buffer_nest_end - Allow to trace while nested
4175 int cpu; in ring_buffer_nest_end() local
4178 cpu = raw_smp_processor_id(); in ring_buffer_nest_end()
4179 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
4181 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
4186 * ring_buffer_unlock_commit - commit a reserved
4196 int cpu = raw_smp_processor_id(); in ring_buffer_unlock_commit() local
4198 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
4239 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) in show_flags()
4244 if (entry->flags & TRACE_FLAG_SOFTIRQ) in show_flags()
4247 if (entry->flags & TRACE_FLAG_HARDIRQ) in show_flags()
4250 if (entry->flags & TRACE_FLAG_NMI) in show_flags()
4260 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) in show_irq()
4264 if (entry->flags & TRACE_FLAG_IRQS_OFF) in show_irq()
4294 ts = bpage->time_stamp; in dump_buffer_page()
4299 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
4301 switch (event->type_len) { in dump_buffer_page()
4318 ts += event->time_delta; in dump_buffer_page()
4320 e, ts, event->time_delta); in dump_buffer_page()
4324 ts += event->time_delta; in dump_buffer_page()
4326 e, ts, event->time_delta, in dump_buffer_page()
4347 atomic_inc(&cpu_buffer->record_disabled); \
4353 /* Do not re-enable checking */ \
4370 bpage = info->tail_page->page; in check_buffer()
4374 tail = local_read(&bpage->commit); in check_buffer()
4375 } else if (info->add_timestamp & in check_buffer()
4385 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
4394 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); in check_buffer()
4397 buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n", in check_buffer()
4398 cpu_buffer->cpu, ts, delta); in check_buffer()
4402 if ((full && ts > info->ts) || in check_buffer()
4403 (!full && ts + info->delta != info->ts)) { in check_buffer()
4404 …buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld … in check_buffer()
4405 cpu_buffer->cpu, in check_buffer()
4406 ts + info->delta, info->ts, info->delta, in check_buffer()
4407 info->before, info->after, in check_buffer()
4429 /* Don't let the compiler play games with cpu_buffer->tail_page */ in __rb_reserve_next()
4430 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
4432 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next()
4434 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4435 rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4437 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4439 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { in __rb_reserve_next()
4440 info->delta = info->ts; in __rb_reserve_next()
4448 /* Use the sub-buffer timestamp */ in __rb_reserve_next()
4449 info->delta = 0; in __rb_reserve_next()
4450 } else if (unlikely(info->before != info->after)) { in __rb_reserve_next()
4451 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
4452 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
4454 info->delta = info->ts - info->after; in __rb_reserve_next()
4455 if (unlikely(test_time_stamp(info->delta))) { in __rb_reserve_next()
4456 info->add_timestamp |= RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
4457 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
4462 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
4464 /*C*/ write = local_add_return(info->length, &tail_page->write); in __rb_reserve_next()
4469 tail = write - info->length; in __rb_reserve_next()
4472 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
4479 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
4486 if (likely(!(info->add_timestamp & in __rb_reserve_next()
4489 info->delta = info->ts - info->after; in __rb_reserve_next()
4492 info->delta = info->ts; in __rb_reserve_next()
4496 /* SLOW PATH - Interrupted between A and C */ in __rb_reserve_next()
4499 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4502 * Read a new timestamp and update the before_stamp to make in __rb_reserve_next()
4507 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4508 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
4511 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4513 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && in __rb_reserve_next()
4514 info->after == info->before && info->after < ts) { in __rb_reserve_next()
4517 * safe to use info->after for the delta as it in __rb_reserve_next()
4518 * matched info->before and is still valid. in __rb_reserve_next()
4520 info->delta = ts - info->after; in __rb_reserve_next()
4530 info->delta = 0; in __rb_reserve_next()
4532 info->ts = ts; in __rb_reserve_next()
4533 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; in __rb_reserve_next()
4540 if (unlikely(!tail && !(info->add_timestamp & in __rb_reserve_next()
4542 info->delta = 0; in __rb_reserve_next()
4549 local_inc(&tail_page->entries); in __rb_reserve_next()
4556 tail_page->page->time_stamp = info->ts; in __rb_reserve_next()
4559 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
4590 * Due to the ability to swap a cpu buffer from a buffer in rb_reserve_next_event()
4596 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
4597 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
4598 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
4605 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
4608 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
4632 if (unlikely(PTR_ERR(event) == -EAGAIN)) { in rb_reserve_next_event()
4634 info.length -= RB_LEN_TIME_EXTEND; in rb_reserve_next_event()
4646 * ring_buffer_lock_reserve - reserve a part of the buffer
4665 int cpu; in ring_buffer_lock_reserve() local
4670 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
4673 cpu = raw_smp_processor_id(); in ring_buffer_lock_reserve()
4675 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
4678 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
4680 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
4683 if (unlikely(length > buffer->max_data_size)) in ring_buffer_lock_reserve()
4714 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
4717 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
4720 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
4721 local_dec(&bpage->entries); in rb_decrement_entry()
4732 if (bpage->page == (void *)addr) { in rb_decrement_entry()
4733 local_dec(&bpage->entries); in rb_decrement_entry()
4744 * ring_buffer_discard_commit - discard an event that has not been committed
4750 * and then that event will not be read later.
4766 int cpu; in ring_buffer_discard_commit() local
4771 cpu = smp_processor_id(); in ring_buffer_discard_commit()
4772 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
4779 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
4793 * ring_buffer_write - write data to the buffer without reserving
4812 int ret = -EBUSY; in ring_buffer_write()
4813 int cpu; in ring_buffer_write() local
4817 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
4818 return -EBUSY; in ring_buffer_write()
4820 cpu = raw_smp_processor_id(); in ring_buffer_write()
4822 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
4823 return -EBUSY; in ring_buffer_write()
4825 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4827 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
4828 return -EBUSY; in ring_buffer_write()
4830 if (length > buffer->max_data_size) in ring_buffer_write()
4831 return -EBUSY; in ring_buffer_write()
4834 return -EBUSY; in ring_buffer_write()
4859 * the entries read from the ring buffer and the number of
4865 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4866 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4875 * ring_buffer_record_disable - stop all writes into the buffer
4885 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
4890 * ring_buffer_record_enable - enable writes to the buffer
4898 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4903 * ring_buffer_record_off - stop all writes into the buffer
4918 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4921 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
4926 * ring_buffer_record_on - restart writes into the buffer
4941 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4944 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4949 * ring_buffer_record_is_on - return true if the ring buffer can write
4956 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4960 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4972 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4976 * ring_buffer_record_is_on_cpu - return true if the ring buffer can write
4978 * @cpu: The CPU to test if the ring buffer can write too
4981 * for a particular CPU.
4983 bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_is_on_cpu() argument
4987 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_is_on_cpu()
4990 !atomic_read(&cpu_buffer->record_disabled); in ring_buffer_record_is_on_cpu()
4994 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4996 * @cpu: The CPU buffer to stop
5003 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
5007 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
5010 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
5011 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
5016 * ring_buffer_record_enable_cpu - enable writes to the buffer
5018 * @cpu: The CPU to enable.
5023 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
5027 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
5030 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
5031 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
5036 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
5038 * @cpu: The per CPU buffer to read from.
5040 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
5047 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
5050 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
5051 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
5056 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
5057 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
5061 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
5062 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
5069 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
5071 * @cpu: The per CPU buffer to read from.
5073 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
5078 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
5081 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
5082 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
5089 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
5091 * @cpu: The per CPU buffer to get the entries from.
5093 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
5097 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
5100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
5107 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
5110 * @cpu: The per CPU buffer to get the number of overruns from
5112 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
5117 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
5120 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
5121 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
5128 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
5132 * @cpu: The per CPU buffer to get the number of overruns from
5135 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
5140 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
5143 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
5144 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
5151 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
5154 * @cpu: The per CPU buffer to get the number of overruns from
5157 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
5162 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
5165 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
5166 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
5173 * ring_buffer_read_events_cpu - get the number of events successfully read
5175 * @cpu: The per CPU buffer to get the number of events read
5178 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
5182 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
5185 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
5186 return cpu_buffer->read; in ring_buffer_read_events_cpu()
5191 * ring_buffer_entries - get the number of entries in a buffer
5195 * (all CPU entries)
5201 int cpu; in ring_buffer_entries() local
5204 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
5205 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
5214 * ring_buffer_overruns - get the number of overruns in buffer
5218 * (all CPU entries)
5224 int cpu; in ring_buffer_overruns() local
5227 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
5228 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
5229 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
5238 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
5241 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
5242 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
5243 iter->next_event = iter->head; in rb_iter_reset()
5245 iter->cache_reader_page = iter->head_page; in rb_iter_reset()
5246 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
5247 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
5249 if (iter->head) { in rb_iter_reset()
5250 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
5251 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
5253 iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_reset()
5254 iter->page_stamp = iter->read_stamp; in rb_iter_reset()
5259 * ring_buffer_iter_reset - reset an iterator
5273 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
5275 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5277 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5282 * ring_buffer_iter_empty - check if an iterator has no more to read
5296 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
5297 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
5298 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
5299 commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5300 commit_ts = commit_page->page->time_stamp; in ring_buffer_iter_empty()
5313 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5314 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); in ring_buffer_iter_empty()
5322 return ((iter->head_page == commit_page && iter->head >= commit) || in ring_buffer_iter_empty()
5323 (iter->head_page == reader && commit_page == head_page && in ring_buffer_iter_empty()
5324 head_page->read == commit && in ring_buffer_iter_empty()
5325 iter->head == rb_page_size(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
5335 switch (event->type_len) { in rb_update_read_stamp()
5341 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
5346 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
5347 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
5351 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
5365 switch (event->type_len) { in rb_update_iter_read_stamp()
5371 iter->read_stamp += delta; in rb_update_iter_read_stamp()
5376 delta = rb_fix_abs_ts(delta, iter->read_stamp); in rb_update_iter_read_stamp()
5377 iter->read_stamp = delta; in rb_update_iter_read_stamp()
5381 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
5385 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
5393 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
5400 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
5414 reader = cpu_buffer->reader_page; in rb_get_reader_page()
5416 /* If there's more to read, return this page */ in rb_get_reader_page()
5417 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
5422 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
5427 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
5437 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
5438 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
5439 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
5448 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
5449 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
5452 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
5456 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
5459 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
5462 * We want to make sure we read the overruns after we set up our in rb_get_reader_page()
5471 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
5484 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
5492 if (cpu_buffer->ring_meta) in rb_get_reader_page()
5500 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
5501 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
5503 cpu_buffer->cnt++; in rb_get_reader_page()
5504 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
5507 cpu_buffer->reader_page = reader; in rb_get_reader_page()
5508 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
5510 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
5511 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
5512 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
5519 if (reader && reader->read == 0) in rb_get_reader_page()
5520 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
5522 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
5550 * if the page has not been fully filled, so the read barrier in rb_get_reader_page()
5574 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
5575 cpu_buffer->read++; in rb_advance_reader()
5580 cpu_buffer->reader_page->read += length; in rb_advance_reader()
5581 cpu_buffer->read_bytes += length; in rb_advance_reader()
5588 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
5591 if (iter->head == iter->next_event) { in rb_advance_iter()
5597 iter->head = iter->next_event; in rb_advance_iter()
5602 if (iter->next_event >= rb_page_size(iter->head_page)) { in rb_advance_iter()
5604 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
5610 rb_update_iter_read_stamp(iter, iter->event); in rb_advance_iter()
5615 return cpu_buffer->lost_events; in rb_lost_events()
5644 switch (event->type_len) { in rb_buffer_peek()
5666 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); in rb_buffer_peek()
5667 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5668 cpu_buffer->cpu, ts); in rb_buffer_peek()
5676 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
5677 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5678 cpu_buffer->cpu, ts); in rb_buffer_peek()
5703 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
5704 buffer = cpu_buffer->buffer; in rb_iter_peek()
5707 * Check if someone performed a consuming read to the buffer in rb_iter_peek()
5711 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
5712 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
5713 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
5722 * to read, just give up if we fail to get an event after in rb_iter_peek()
5733 if (iter->head >= rb_page_size(iter->head_page)) { in rb_iter_peek()
5742 switch (event->type_len) { in rb_iter_peek()
5759 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); in rb_iter_peek()
5760 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
5761 cpu_buffer->cpu, ts); in rb_iter_peek()
5769 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
5771 cpu_buffer->cpu, ts); in rb_iter_peek()
5786 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
5795 * to do the read, but this can corrupt the ring buffer, in rb_reader_lock()
5799 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
5803 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
5811 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
5815 * ring_buffer_peek - peek at the next event to be read
5816 * @buffer: The ring buffer to read
5817 * @cpu: The cpu to peak at
5821 * This will return the event that will be read next, but does
5825 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
5828 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5833 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
5840 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
5845 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
5851 /** ring_buffer_iter_dropped - report if there are dropped events
5858 bool ret = iter->missed_events != 0; in ring_buffer_iter_dropped()
5860 iter->missed_events = 0; in ring_buffer_iter_dropped()
5866 * ring_buffer_iter_peek - peek at the next event to be read
5870 * This will return the event that will be read next, but does
5876 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5881 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5883 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5885 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
5892 * ring_buffer_consume - return an event and consume it
5894 * @cpu: the cpu to read the buffer from
5903 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
5915 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5918 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5924 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5934 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
5942 * ring_buffer_read_start - start a non consuming read of the buffer
5943 * @buffer: The ring buffer to read from
5944 * @cpu: The cpu buffer to iterate over
5947 * This creates an iterator to allow non-consuming iteration through
5955 ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_start() argument
5960 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_start()
5968 iter->event_size = buffer->subbuf_size; in ring_buffer_read_start()
5969 iter->event = kmalloc(iter->event_size, flags); in ring_buffer_read_start()
5970 if (!iter->event) { in ring_buffer_read_start()
5975 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_start()
5977 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_start()
5979 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_start()
5981 guard(raw_spinlock_irqsave)(&cpu_buffer->reader_lock); in ring_buffer_read_start()
5982 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5984 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5991 * ring_buffer_read_finish - finish reading the iterator of the buffer
5994 * This re-enables resizing of the buffer, and frees the iterator.
5999 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
6004 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
6005 kfree(iter->event); in ring_buffer_read_finish()
6011 * ring_buffer_iter_advance - advance the iterator to the next location
6014 * Move the location of the iterator such that the next read will
6019 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
6022 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
6026 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
6031 * ring_buffer_size - return the size of the ring buffer (in bytes)
6033 * @cpu: The CPU to get ring buffer size from.
6035 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
6037 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
6040 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
6045 * ring_buffer_max_event_size - return the max data size of an event
6054 return buffer->max_data_size - RB_LEN_TIME_EXTEND; in ring_buffer_max_event_size()
6055 return buffer->max_data_size; in ring_buffer_max_event_size()
6061 local_set(&page->write, 0); in rb_clear_buffer_page()
6062 local_set(&page->entries, 0); in rb_clear_buffer_page()
6063 rb_init_page(page->page); in rb_clear_buffer_page()
6064 page->read = 0; in rb_clear_buffer_page()
6092 if (cpu_buffer->ring_meta) in rb_page_id()
6093 id = rb_meta_subbuf_idx(cpu_buffer->ring_meta, bpage->page); in rb_page_id()
6095 bpage->id = id; in rb_page_id()
6102 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_update_meta_page()
6107 meta->reader.read = cpu_buffer->reader_page->read; in rb_update_meta_page()
6108 meta->reader.id = rb_page_id(cpu_buffer, cpu_buffer->reader_page, in rb_update_meta_page()
6109 cpu_buffer->reader_page->id); in rb_update_meta_page()
6111 meta->reader.lost_events = cpu_buffer->lost_events; in rb_update_meta_page()
6113 meta->entries = local_read(&cpu_buffer->entries); in rb_update_meta_page()
6114 meta->overrun = local_read(&cpu_buffer->overrun); in rb_update_meta_page()
6115 meta->read = cpu_buffer->read; in rb_update_meta_page()
6117 /* Some archs do not have data cache coherency between kernel and user-space */ in rb_update_meta_page()
6118 flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE); in rb_update_meta_page()
6128 cpu_buffer->head_page in rb_reset_cpu()
6129 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
6130 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
6131 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
6135 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
6136 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
6138 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
6139 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
6140 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
6142 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
6143 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
6144 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
6145 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
6146 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
6147 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
6148 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
6149 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
6150 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
6151 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
6152 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
6153 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
6154 cpu_buffer->read = 0; in rb_reset_cpu()
6155 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
6157 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
6158 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
6160 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
6162 cpu_buffer->lost_events = 0; in rb_reset_cpu()
6163 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
6166 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
6168 if (cpu_buffer->mapped) { in rb_reset_cpu()
6170 if (cpu_buffer->ring_meta) { in rb_reset_cpu()
6171 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_reset_cpu()
6172 meta->commit_buffer = meta->head_buffer; in rb_reset_cpu()
6177 /* Must have disabled the cpu buffer then done a synchronize_rcu */
6180 guard(raw_spinlock_irqsave)(&cpu_buffer->reader_lock); in reset_disabled_cpu_buffer()
6182 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
6185 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6189 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6193 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
6194 * @buffer: The ring buffer to reset a per cpu buffer of
6195 * @cpu: The CPU buffer to be reset
6197 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
6199 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
6201 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
6205 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
6207 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6208 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6215 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6216 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6218 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
6226 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
6227 * @buffer: The ring buffer to reset a per cpu buffer of
6232 int cpu; in ring_buffer_reset_online_cpus() local
6235 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
6237 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
6238 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6240 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6241 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6247 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
6248 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6251 * If a CPU came online during the synchronize_rcu(), then in ring_buffer_reset_online_cpus()
6254 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
6259 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6260 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6263 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
6267 * ring_buffer_reset - reset a ring buffer
6268 * @buffer: The ring buffer to reset all cpu buffers
6273 int cpu; in ring_buffer_reset() local
6276 mutex_lock(&buffer->mutex); in ring_buffer_reset()
6278 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
6279 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6281 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6282 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
6288 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
6289 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6293 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
6294 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6297 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
6302 * ring_buffer_empty - is the ring buffer empty?
6311 int cpu; in ring_buffer_empty() local
6314 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
6315 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
6331 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
6333 * @cpu: The CPU buffer to test
6335 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
6342 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
6345 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
6358 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
6361 * @cpu: the CPU of the buffers to swap
6364 * of a CPU buffer and has another back up buffer lying around.
6365 * it is expected that the tracer handles the cpu buffer not being
6369 struct trace_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument
6373 int ret = -EINVAL; in ring_buffer_swap_cpu()
6375 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
6376 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
6377 return -EINVAL; in ring_buffer_swap_cpu()
6379 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
6380 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
6383 if (WARN_ON_ONCE(cpu_buffer_a->mapped || cpu_buffer_b->mapped)) in ring_buffer_swap_cpu()
6384 return -EBUSY; in ring_buffer_swap_cpu()
6387 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
6388 return -EINVAL; in ring_buffer_swap_cpu()
6390 if (buffer_a->subbuf_order != buffer_b->subbuf_order) in ring_buffer_swap_cpu()
6391 return -EINVAL; in ring_buffer_swap_cpu()
6393 if (atomic_read(&buffer_a->record_disabled)) in ring_buffer_swap_cpu()
6394 return -EAGAIN; in ring_buffer_swap_cpu()
6396 if (atomic_read(&buffer_b->record_disabled)) in ring_buffer_swap_cpu()
6397 return -EAGAIN; in ring_buffer_swap_cpu()
6399 if (atomic_read(&cpu_buffer_a->record_disabled)) in ring_buffer_swap_cpu()
6400 return -EAGAIN; in ring_buffer_swap_cpu()
6402 if (atomic_read(&cpu_buffer_b->record_disabled)) in ring_buffer_swap_cpu()
6403 return -EAGAIN; in ring_buffer_swap_cpu()
6408 * Normally this will be called from the same CPU as cpu. in ring_buffer_swap_cpu()
6411 atomic_inc(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
6412 atomic_inc(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
6414 ret = -EBUSY; in ring_buffer_swap_cpu()
6415 if (local_read(&cpu_buffer_a->committing)) in ring_buffer_swap_cpu()
6417 if (local_read(&cpu_buffer_b->committing)) in ring_buffer_swap_cpu()
6422 * it will mess the state of the cpu buffer. in ring_buffer_swap_cpu()
6424 if (atomic_read(&buffer_a->resizing)) in ring_buffer_swap_cpu()
6426 if (atomic_read(&buffer_b->resizing)) in ring_buffer_swap_cpu()
6429 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
6430 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
6432 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
6433 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
6438 atomic_dec(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
6439 atomic_dec(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
6446 * ring_buffer_alloc_read_page - allocate a page to read from buffer
6448 * @cpu: the cpu buffer to allocate.
6456 * the page that was allocated, with the read page of the buffer.
6462 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
6469 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
6470 return ERR_PTR(-ENODEV); in ring_buffer_alloc_read_page()
6474 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
6476 bpage->order = buffer->subbuf_order; in ring_buffer_alloc_read_page()
6477 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
6479 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6481 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
6482 bpage->data = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
6483 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
6486 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6489 if (bpage->data) in ring_buffer_alloc_read_page()
6492 page = alloc_pages_node(cpu_to_node(cpu), in ring_buffer_alloc_read_page()
6494 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
6497 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
6500 bpage->data = page_address(page); in ring_buffer_alloc_read_page()
6503 rb_init_page(bpage->data); in ring_buffer_alloc_read_page()
6510 * ring_buffer_free_read_page - free an allocated read page
6512 * @cpu: the cpu buffer the page came from
6517 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, in ring_buffer_free_read_page() argument
6521 struct buffer_data_page *bpage = data_page->data; in ring_buffer_free_read_page()
6525 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
6528 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
6532 * is different from the subbuffer order of the buffer - in ring_buffer_free_read_page()
6535 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order) in ring_buffer_free_read_page()
6539 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6541 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
6542 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
6546 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6550 free_pages((unsigned long)bpage, data_page->order); in ring_buffer_free_read_page()
6556 * ring_buffer_read_page - extract a page from the ring buffer
6560 * @cpu: the cpu of the buffer to extract
6569 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
6572 * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
6575 * ring_buffer_free_read_page(buffer, cpu, rpage);
6591 size_t len, int cpu, int full) in ring_buffer_read_page() argument
6593 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
6599 unsigned int read; in ring_buffer_read_page() local
6602 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
6603 return -1; in ring_buffer_read_page()
6610 return -1; in ring_buffer_read_page()
6612 len -= BUF_PAGE_HDR_SIZE; in ring_buffer_read_page()
6614 if (!data_page || !data_page->data) in ring_buffer_read_page()
6615 return -1; in ring_buffer_read_page()
6617 if (data_page->order != buffer->subbuf_order) in ring_buffer_read_page()
6618 return -1; in ring_buffer_read_page()
6620 bpage = data_page->data; in ring_buffer_read_page()
6622 return -1; in ring_buffer_read_page()
6624 guard(raw_spinlock_irqsave)(&cpu_buffer->reader_lock); in ring_buffer_read_page()
6628 return -1; in ring_buffer_read_page()
6632 read = reader->read; in ring_buffer_read_page()
6636 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
6639 * If this page has been partially read or in ring_buffer_read_page()
6640 * if len is not big enough to read the rest of the page or in ring_buffer_read_page()
6645 if (read || (len < (commit - read)) || in ring_buffer_read_page()
6646 cpu_buffer->reader_page == cpu_buffer->commit_page || in ring_buffer_read_page()
6647 cpu_buffer->mapped) { in ring_buffer_read_page()
6648 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
6649 unsigned int rpos = read; in ring_buffer_read_page()
6655 * if there's been a previous partial read and the in ring_buffer_read_page()
6656 * rest of the page can be read and the commit page is off in ring_buffer_read_page()
6660 (!read || (len < (commit - read)) || in ring_buffer_read_page()
6661 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
6662 return -1; in ring_buffer_read_page()
6664 if (len > (commit - read)) in ring_buffer_read_page()
6665 len = (commit - read); in ring_buffer_read_page()
6671 return -1; in ring_buffer_read_page()
6674 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
6685 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
6687 len -= size; in ring_buffer_read_page()
6690 rpos = reader->read; in ring_buffer_read_page()
6702 local_set(&bpage->commit, pos); in ring_buffer_read_page()
6703 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
6706 read = 0; in ring_buffer_read_page()
6709 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
6710 cpu_buffer->read_bytes += rb_page_size(reader); in ring_buffer_read_page()
6714 bpage = reader->page; in ring_buffer_read_page()
6715 reader->page = data_page->data; in ring_buffer_read_page()
6716 local_set(&reader->write, 0); in ring_buffer_read_page()
6717 local_set(&reader->entries, 0); in ring_buffer_read_page()
6718 reader->read = 0; in ring_buffer_read_page()
6719 data_page->data = bpage; in ring_buffer_read_page()
6726 if (reader->real_end) in ring_buffer_read_page()
6727 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
6730 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
6732 commit = local_read(&bpage->commit); in ring_buffer_read_page()
6740 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
6741 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
6743 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
6746 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
6752 if (commit < buffer->subbuf_size) in ring_buffer_read_page()
6753 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit); in ring_buffer_read_page()
6755 return read; in ring_buffer_read_page()
6760 * ring_buffer_read_page_data - get pointer to the data in the page.
6767 return page->data; in ring_buffer_read_page_data()
6772 * ring_buffer_subbuf_size_get - get size of the sub buffer.
6779 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_size_get()
6784 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
6798 return -EINVAL; in ring_buffer_subbuf_order_get()
6800 return buffer->subbuf_order; in ring_buffer_subbuf_order_get()
6805 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
6813 * 0 - 1 system page
6814 * 1 - 2 system pages
6815 * 3 - 4 system pages
6828 int cpu; in ring_buffer_subbuf_order_set() local
6831 return -EINVAL; in ring_buffer_subbuf_order_set()
6833 if (buffer->subbuf_order == order) in ring_buffer_subbuf_order_set()
6838 return -EINVAL; in ring_buffer_subbuf_order_set()
6842 return -EINVAL; in ring_buffer_subbuf_order_set()
6844 old_order = buffer->subbuf_order; in ring_buffer_subbuf_order_set()
6845 old_size = buffer->subbuf_size; in ring_buffer_subbuf_order_set()
6848 guard(mutex)(&buffer->mutex); in ring_buffer_subbuf_order_set()
6849 atomic_inc(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6854 buffer->subbuf_order = order; in ring_buffer_subbuf_order_set()
6855 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_order_set()
6858 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6860 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6863 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6865 if (cpu_buffer->mapped) { in ring_buffer_subbuf_order_set()
6866 err = -EBUSY; in ring_buffer_subbuf_order_set()
6871 nr_pages = old_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_subbuf_order_set()
6872 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size); in ring_buffer_subbuf_order_set()
6878 cpu_buffer->nr_pages_to_update = nr_pages; in ring_buffer_subbuf_order_set()
6884 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6886 &cpu_buffer->new_pages)) { in ring_buffer_subbuf_order_set()
6888 err = -ENOMEM; in ring_buffer_subbuf_order_set()
6893 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6898 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6901 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6903 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6905 /* Clear the head bit to make the link list normal to read */ in ring_buffer_subbuf_order_set()
6915 list_add(&old_pages, cpu_buffer->pages); in ring_buffer_subbuf_order_set()
6916 list_add(&cpu_buffer->reader_page->list, &old_pages); in ring_buffer_subbuf_order_set()
6919 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, in ring_buffer_subbuf_order_set()
6921 list_del_init(&cpu_buffer->reader_page->list); in ring_buffer_subbuf_order_set()
6924 cpu_buffer->pages = cpu_buffer->new_pages.next; in ring_buffer_subbuf_order_set()
6925 list_del_init(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6926 cpu_buffer->cnt++; in ring_buffer_subbuf_order_set()
6928 cpu_buffer->head_page in ring_buffer_subbuf_order_set()
6929 = list_entry(cpu_buffer->pages, struct buffer_page, list); in ring_buffer_subbuf_order_set()
6930 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in ring_buffer_subbuf_order_set()
6932 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; in ring_buffer_subbuf_order_set()
6933 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_subbuf_order_set()
6935 old_free_data_page = cpu_buffer->free_page; in ring_buffer_subbuf_order_set()
6936 cpu_buffer->free_page = NULL; in ring_buffer_subbuf_order_set()
6940 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6944 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
6952 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6957 buffer->subbuf_order = old_order; in ring_buffer_subbuf_order_set()
6958 buffer->subbuf_size = old_size; in ring_buffer_subbuf_order_set()
6960 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6962 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6965 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_subbuf_order_set()
6968 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { in ring_buffer_subbuf_order_set()
6969 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
6982 if (cpu_buffer->meta_page) in rb_alloc_meta_page()
6987 return -ENOMEM; in rb_alloc_meta_page()
6989 cpu_buffer->meta_page = page_to_virt(page); in rb_alloc_meta_page()
6996 unsigned long addr = (unsigned long)cpu_buffer->meta_page; in rb_free_meta_page()
6999 cpu_buffer->meta_page = NULL; in rb_free_meta_page()
7005 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_setup_ids_meta_page()
7006 unsigned int nr_subbufs = cpu_buffer->nr_pages + 1; in rb_setup_ids_meta_page()
7011 id = rb_page_id(cpu_buffer, cpu_buffer->reader_page, id); in rb_setup_ids_meta_page()
7012 subbuf_ids[id++] = (unsigned long)cpu_buffer->reader_page->page; in rb_setup_ids_meta_page()
7022 subbuf_ids[id] = (unsigned long)subbuf->page; in rb_setup_ids_meta_page()
7032 cpu_buffer->subbuf_ids = subbuf_ids; in rb_setup_ids_meta_page()
7034 meta->meta_struct_len = sizeof(*meta); in rb_setup_ids_meta_page()
7035 meta->nr_subbufs = nr_subbufs; in rb_setup_ids_meta_page()
7036 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_setup_ids_meta_page()
7037 meta->meta_page_size = meta->subbuf_size; in rb_setup_ids_meta_page()
7043 rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu) in rb_get_mapped_buffer() argument
7047 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in rb_get_mapped_buffer()
7048 return ERR_PTR(-EINVAL); in rb_get_mapped_buffer()
7050 cpu_buffer = buffer->buffers[cpu]; in rb_get_mapped_buffer()
7052 mutex_lock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
7054 if (!cpu_buffer->user_mapped) { in rb_get_mapped_buffer()
7055 mutex_unlock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
7056 return ERR_PTR(-ENODEV); in rb_get_mapped_buffer()
7064 mutex_unlock(&cpu_buffer->mapping_lock); in rb_put_mapped_buffer()
7068 * Fast-path for rb_buffer_(un)map(). Called whenever the meta-page doesn't need
7069 * to be set-up or torn-down.
7076 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_inc_dec_mapped()
7079 if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped)) in __rb_inc_dec_mapped()
7080 return -EINVAL; in __rb_inc_dec_mapped()
7082 if (inc && cpu_buffer->mapped == UINT_MAX) in __rb_inc_dec_mapped()
7083 return -EBUSY; in __rb_inc_dec_mapped()
7085 if (WARN_ON(!inc && cpu_buffer->user_mapped == 0)) in __rb_inc_dec_mapped()
7086 return -EINVAL; in __rb_inc_dec_mapped()
7088 mutex_lock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7089 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
7092 cpu_buffer->user_mapped++; in __rb_inc_dec_mapped()
7093 cpu_buffer->mapped++; in __rb_inc_dec_mapped()
7095 cpu_buffer->user_mapped--; in __rb_inc_dec_mapped()
7096 cpu_buffer->mapped--; in __rb_inc_dec_mapped()
7099 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
7100 mutex_unlock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7106 * +--------------+ pgoff == 0
7108 * +--------------+ pgoff == 1
7111 * +--------------+ pgoff == (1 + (1 << subbuf_order))
7120 unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff; in __rb_map_vma()
7127 if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC || in __rb_map_vma()
7128 !(vma->vm_flags & VM_MAYSHARE)) in __rb_map_vma()
7129 return -EPERM; in __rb_map_vma()
7131 subbuf_order = cpu_buffer->buffer->subbuf_order; in __rb_map_vma()
7135 return -EINVAL; in __rb_map_vma()
7144 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_map_vma()
7146 nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */ in __rb_map_vma()
7147 nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */ in __rb_map_vma()
7149 return -EINVAL; in __rb_map_vma()
7151 nr_pages -= pgoff; in __rb_map_vma()
7155 return -EINVAL; in __rb_map_vma()
7161 return -ENOMEM; in __rb_map_vma()
7166 pages[p++] = virt_to_page(cpu_buffer->meta_page); in __rb_map_vma()
7169 * Pad with the zero-page to align the meta-page with the in __rb_map_vma()
7170 * sub-buffers. in __rb_map_vma()
7172 meta_page_padding = subbuf_pages - 1; in __rb_map_vma()
7173 while (meta_page_padding-- && p < nr_pages) { in __rb_map_vma()
7175 vma->vm_start + (PAGE_SIZE * p); in __rb_map_vma()
7180 /* Skip the meta-page */ in __rb_map_vma()
7181 pgoff -= subbuf_pages; in __rb_map_vma()
7191 return -EINVAL; in __rb_map_vma()
7193 page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]); in __rb_map_vma()
7204 err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages); in __rb_map_vma()
7212 return -EOPNOTSUPP; in __rb_map_vma()
7216 int ring_buffer_map(struct trace_buffer *buffer, int cpu, in ring_buffer_map() argument
7223 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_map()
7224 return -EINVAL; in ring_buffer_map()
7226 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_map()
7228 guard(mutex)(&cpu_buffer->mapping_lock); in ring_buffer_map()
7230 if (cpu_buffer->user_mapped) { in ring_buffer_map()
7237 /* prevent another thread from changing buffer/sub-buffer sizes */ in ring_buffer_map()
7238 guard(mutex)(&buffer->mutex); in ring_buffer_map()
7245 subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL); in ring_buffer_map()
7248 return -ENOMEM; in ring_buffer_map()
7251 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_map()
7257 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7260 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7264 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7266 cpu_buffer->mapped++; in ring_buffer_map()
7267 cpu_buffer->user_mapped = 1; in ring_buffer_map()
7268 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7270 kfree(cpu_buffer->subbuf_ids); in ring_buffer_map()
7271 cpu_buffer->subbuf_ids = NULL; in ring_buffer_map()
7273 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_map()
7279 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu) in ring_buffer_unmap() argument
7284 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_unmap()
7285 return -EINVAL; in ring_buffer_unmap()
7287 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unmap()
7289 guard(mutex)(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7291 if (!cpu_buffer->user_mapped) { in ring_buffer_unmap()
7292 return -ENODEV; in ring_buffer_unmap()
7293 } else if (cpu_buffer->user_mapped > 1) { in ring_buffer_unmap()
7298 guard(mutex)(&buffer->mutex); in ring_buffer_unmap()
7299 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7302 if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped)) in ring_buffer_unmap()
7303 cpu_buffer->mapped--; in ring_buffer_unmap()
7304 cpu_buffer->user_mapped = 0; in ring_buffer_unmap()
7306 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7308 kfree(cpu_buffer->subbuf_ids); in ring_buffer_unmap()
7309 cpu_buffer->subbuf_ids = NULL; in ring_buffer_unmap()
7311 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_unmap()
7316 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu) in ring_buffer_map_get_reader() argument
7324 cpu_buffer = rb_get_mapped_buffer(buffer, cpu); in ring_buffer_map_get_reader()
7328 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7334 reader_size = rb_page_size(cpu_buffer->reader_page); in ring_buffer_map_get_reader()
7337 * There are data to be read on the current reader page, we can in ring_buffer_map_get_reader()
7338 * return to the caller. But before that, we assume the latter will read in ring_buffer_map_get_reader()
7341 if (cpu_buffer->reader_page->read < reader_size) { in ring_buffer_map_get_reader()
7342 while (cpu_buffer->reader_page->read < reader_size) in ring_buffer_map_get_reader()
7348 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in ring_buffer_map_get_reader()
7356 missed_events = cpu_buffer->lost_events; in ring_buffer_map_get_reader()
7359 if (cpu_buffer->reader_page != cpu_buffer->commit_page) { in ring_buffer_map_get_reader()
7360 struct buffer_data_page *bpage = reader->page; in ring_buffer_map_get_reader()
7367 if (reader->real_end) in ring_buffer_map_get_reader()
7368 local_set(&bpage->commit, reader->real_end); in ring_buffer_map_get_reader()
7374 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_map_get_reader()
7375 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_map_get_reader()
7377 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_map_get_reader()
7379 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_map_get_reader()
7380 } else if (!WARN_ONCE(cpu_buffer->reader_page == cpu_buffer->tail_page, in ring_buffer_map_get_reader()
7395 cpu, missed_events, cpu_buffer->reader_page->page->time_stamp); in ring_buffer_map_get_reader()
7399 cpu_buffer->lost_events = 0; in ring_buffer_map_get_reader()
7404 /* Some archs do not have data cache coherency between kernel and user-space */ in ring_buffer_map_get_reader()
7405 flush_kernel_vmap_range(cpu_buffer->reader_page->page, in ring_buffer_map_get_reader()
7406 buffer->subbuf_size + BUF_PAGE_HDR_SIZE); in ring_buffer_map_get_reader()
7410 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7417 * We only allocate new buffers, never free them if the CPU goes down.
7421 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) in trace_rb_cpu_prepare() argument
7429 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
7434 /* check if all cpu sizes are same */ in trace_rb_cpu_prepare()
7436 /* fill in the size from first enabled cpu */ in trace_rb_cpu_prepare()
7438 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
7439 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
7447 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
7448 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
7449 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
7450 WARN(1, "failed to allocate ring buffer on CPU %u\n", in trace_rb_cpu_prepare()
7451 cpu); in trace_rb_cpu_prepare()
7452 return -ENOMEM; in trace_rb_cpu_prepare()
7455 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
7463 * It will kick off a thread per CPU that will go into a loop
7464 * writing to the per cpu ring buffer various sizes of data.
7491 int cpu; member
7497 /* 1 meg per cpu */
7523 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
7526 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
7531 /* read rb_test_started before checking buffer enabled */ in rb_write_something()
7534 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
7539 data->bytes_dropped_nested += len; in rb_write_something()
7541 data->bytes_dropped += len; in rb_write_something()
7548 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
7552 item->size = size; in rb_write_something()
7553 memcpy(item->str, rb_string, size); in rb_write_something()
7556 data->bytes_alloc_nested += event_len; in rb_write_something()
7557 data->bytes_written_nested += len; in rb_write_something()
7558 data->events_nested++; in rb_write_something()
7559 if (!data->min_size_nested || len < data->min_size_nested) in rb_write_something()
7560 data->min_size_nested = len; in rb_write_something()
7561 if (len > data->max_size_nested) in rb_write_something()
7562 data->max_size_nested = len; in rb_write_something()
7564 data->bytes_alloc += event_len; in rb_write_something()
7565 data->bytes_written += len; in rb_write_something()
7566 data->events++; in rb_write_something()
7567 if (!data->min_size || len < data->min_size) in rb_write_something()
7568 data->max_size = len; in rb_write_something()
7569 if (len > data->max_size) in rb_write_something()
7570 data->max_size = len; in rb_write_something()
7574 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
7585 data->cnt++; in rb_test()
7588 /* Now sleep between a min of 100-300us and a max of 1ms */ in rb_test()
7589 usleep_range(((data->cnt % 3) + 1) * 100, 1000); in rb_test()
7598 int cpu = smp_processor_id(); in rb_ipi() local
7600 data = &rb_data[cpu]; in rb_ipi()
7621 int cpu; in test_ringbuffer() local
7638 for_each_online_cpu(cpu) { in test_ringbuffer()
7639 rb_data[cpu].buffer = buffer; in test_ringbuffer()
7640 rb_data[cpu].cpu = cpu; in test_ringbuffer()
7641 rb_data[cpu].cnt = cpu; in test_ringbuffer()
7642 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], in test_ringbuffer()
7643 cpu, "rbtester/%u"); in test_ringbuffer()
7644 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { in test_ringbuffer()
7646 ret = PTR_ERR(rb_threads[cpu]); in test_ringbuffer()
7679 for_each_online_cpu(cpu) { in test_ringbuffer()
7680 if (!rb_threads[cpu]) in test_ringbuffer()
7682 kthread_stop(rb_threads[cpu]); in test_ringbuffer()
7691 for_each_online_cpu(cpu) { in test_ringbuffer()
7693 struct rb_test_data *data = &rb_data[cpu]; in test_ringbuffer()
7707 ret = -1; in test_ringbuffer()
7709 total_events = data->events + data->events_nested; in test_ringbuffer()
7710 total_written = data->bytes_written + data->bytes_written_nested; in test_ringbuffer()
7711 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; in test_ringbuffer()
7712 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; in test_ringbuffer()
7714 big_event_size = data->max_size + data->max_size_nested; in test_ringbuffer()
7715 small_event_size = data->min_size + data->min_size_nested; in test_ringbuffer()
7717 pr_info("CPU %d:\n", cpu); in test_ringbuffer()
7730 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
7734 total_size += item->size + sizeof(struct rb_item); in test_ringbuffer()
7735 if (memcmp(&item->str[0], rb_string, item->size) != 0) { in test_ringbuffer()
7737 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
7738 pr_info("expected: %.*s\n", item->size, rb_string); in test_ringbuffer()
7740 ret = -1; in test_ringbuffer()
7748 ret = -1; in test_ringbuffer()
7750 pr_info(" read events: %ld\n", total_read); in test_ringbuffer()