Lines Matching +full:array +full:- +full:nest
1 // SPDX-License-Identifier: GPL-2.0
100 * +------+
103 * +------+ +---+ +---+ +---+
104 * | |-->| |-->| |
105 * +---+ +---+ +---+
108 * +---------------+
111 * +------+
113 * |page |------------------v
114 * +------+ +---+ +---+ +---+
115 * | |-->| |-->| |
116 * +---+ +---+ +---+
119 * +---------------+
122 * +------+
124 * |page |------------------v
125 * +------+ +---+ +---+ +---+
126 * ^ | |-->| |-->| |
127 * | +---+ +---+ +---+
130 * +------------------------------+
133 * +------+
135 * |page |------------------v
136 * +------+ +---+ +---+ +---+
137 * ^ | | | |-->| |
138 * | New +---+ +---+ +---+
139 * | Reader------^ |
141 * +------------------------------+
157 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
184 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
188 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
194 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
195 event->time_delta = 0; in rb_event_set_padding()
203 if (event->type_len) in rb_event_data_length()
204 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
206 length = event->array[0]; in rb_event_data_length()
218 switch (event->type_len) { in rb_event_length()
222 return -1; in rb_event_length()
223 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
258 * ring_buffer_event_length - return the length of the event
275 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
277 length -= RB_EVNT_HDR_SIZE; in ring_buffer_event_length()
278 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
279 length -= sizeof(event->array[0]); in ring_buffer_event_length()
290 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
291 /* If length is in len field, then array[0] has the data */ in rb_event_data()
292 if (event->type_len) in rb_event_data()
293 return (void *)&event->array[0]; in rb_event_data()
294 /* Otherwise length is in array[0] and array[1] has the data */ in rb_event_data()
295 return (void *)&event->array[1]; in rb_event_data()
299 * ring_buffer_event_data - return the data of the event
309 for_each_cpu(cpu, buffer->cpumask)
312 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
315 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
322 ts = event->array[0]; in rb_event_time_stamp()
324 ts += event->time_delta; in rb_event_time_stamp()
384 local_set(&bpage->commit, 0); in rb_init_page()
389 return local_read(&bpage->page->commit); in rb_page_commit()
395 if (!bpage->range) in free_buffer_page()
396 free_pages((unsigned long)bpage->page, bpage->order); in free_buffer_page()
434 * EXTEND - wants a time extend
435 * ABSOLUTE - the buffer requests all events to have absolute time stamps
436 * FORCE - force a full time stamp.
493 unsigned long nest; member
600 (unsigned int)buffer->subbuf_size, in ring_buffer_print_page_header()
608 *ret = local64_read(&t->time); in rb_time_read()
612 local64_set(&t->time, val); in rb_time_set()
626 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
627 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
638 commit = local_read(&page->page->commit); in verify_event()
639 write = local_read(&page->write); in verify_event()
640 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
641 addr < (unsigned long)&page->page->data[write]) in verify_event()
644 next = rb_list_head(page->list.next); in verify_event()
678 * ring_buffer_event_time_stamp - return the event's current time stamp
697 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
698 unsigned int nest; in ring_buffer_event_time_stamp() local
702 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { in ring_buffer_event_time_stamp()
704 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
707 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
709 if (WARN_ON_ONCE(!nest)) in ring_buffer_event_time_stamp()
713 if (likely(--nest < MAX_NEST)) in ring_buffer_event_time_stamp()
714 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
717 WARN_ONCE(1, "nest (%d) greater than max", nest); in ring_buffer_event_time_stamp()
720 rb_time_read(&cpu_buffer->write_stamp, &ts); in ring_buffer_event_time_stamp()
726 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
738 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
739 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
740 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
745 cnt -= lost; in ring_buffer_nr_dirty_pages()
753 return cnt - read; in ring_buffer_nr_dirty_pages()
758 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
762 nr_pages = cpu_buffer->nr_pages; in full_hit()
767 * Add one as dirty will never equal nr_pages, as the sub-buffer in full_hit()
777 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
787 (void)atomic_fetch_inc_release(&rbwork->seq); in rb_wake_up_waiters()
789 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
790 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { in rb_wake_up_waiters()
796 raw_spin_lock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
797 rbwork->wakeup_full = false; in rb_wake_up_waiters()
798 rbwork->full_waiters_pending = false; in rb_wake_up_waiters()
801 cpu_buffer->shortest_full = 0; in rb_wake_up_waiters()
802 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
804 wake_up_all(&rbwork->full_waiters); in rb_wake_up_waiters()
809 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
830 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
832 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
837 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
841 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
845 irq_work_queue(&rbwork->work); in ring_buffer_wake_waiters()
857 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
866 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
867 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_watermark_hit()
870 if (!ret && (!cpu_buffer->shortest_full || in rb_watermark_hit()
871 cpu_buffer->shortest_full > full)) { in rb_watermark_hit()
872 cpu_buffer->shortest_full = full; in rb_watermark_hit()
874 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
910 rbwork->full_waiters_pending = true; in rb_wait_cond()
912 rbwork->waiters_pending = true; in rb_wait_cond()
929 struct rb_irq_work *rbwork = rdata->irq_work; in rb_wait_once()
931 return atomic_read_acquire(&rbwork->seq) != rdata->seq; in rb_wait_once()
935 * ring_buffer_wait - wait for input to the ring buffer
961 rbwork = &buffer->irq_work; in ring_buffer_wait()
965 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
966 return -ENODEV; in ring_buffer_wait()
967 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
968 rbwork = &cpu_buffer->irq_work; in ring_buffer_wait()
972 waitq = &rbwork->full_waiters; in ring_buffer_wait()
974 waitq = &rbwork->waiters; in ring_buffer_wait()
980 rdata.seq = atomic_read_acquire(&rbwork->seq); in ring_buffer_wait()
991 * ring_buffer_poll_wait - poll on buffer input
1012 rbwork = &buffer->irq_work; in ring_buffer_poll_wait()
1015 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1018 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1019 rbwork = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1023 poll_wait(filp, &rbwork->full_waiters, poll_table); in ring_buffer_poll_wait()
1040 rbwork->full_waiters_pending = true; in ring_buffer_poll_wait()
1044 poll_wait(filp, &rbwork->waiters, poll_table); in ring_buffer_poll_wait()
1045 rbwork->waiters_pending = true; in ring_buffer_poll_wait()
1076 atomic_inc(&__b->buffer->record_disabled); \
1078 atomic_inc(&b->record_disabled); \
1091 /* Skip retpolines :-( */ in rb_time_stamp()
1092 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1095 ts = buffer->clock(); in rb_time_stamp()
1153 * head->list->prev->next bit 1 bit 0
1154 * ------- -------
1161 * +----+ +-----+ +-----+
1162 * | |------>| T |---X--->| N |
1163 * | |<------| | | |
1164 * +----+ +-----+ +-----+
1166 * | +-----+ | |
1167 * +----------| R |----------+ |
1168 * | |<-----------+
1169 * +-----+
1171 * Key: ---X--> HEAD flag set in pointer
1201 * rb_list_head - remove any bit
1211 * rb_is_head_page - test if the given page is the head page
1223 val = (unsigned long)list->next; in rb_is_head_page()
1225 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1240 struct list_head *list = page->list.prev; in rb_is_reader_page()
1242 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1246 * rb_set_list_to_head - set a list_head to be pointing to head.
1252 ptr = (unsigned long *)&list->next; in rb_set_list_to_head()
1258 * rb_head_page_activate - sets up head page
1264 head = cpu_buffer->head_page; in rb_head_page_activate()
1271 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1273 if (cpu_buffer->ring_meta) { in rb_head_page_activate()
1274 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_head_page_activate()
1275 meta->head_buffer = (unsigned long)head->page; in rb_head_page_activate()
1281 unsigned long *ptr = (unsigned long *)&list->next; in rb_list_head_clear()
1287 * rb_head_page_deactivate - clears head page ptr (for free list)
1295 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1297 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1307 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1310 list = &prev->list; in rb_head_page_set()
1314 ret = cmpxchg((unsigned long *)&list->next, in rb_head_page_set()
1353 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1366 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1370 list = cpu_buffer->pages; in rb_set_head_page()
1371 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1374 page = head = cpu_buffer->head_page; in rb_set_head_page()
1383 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
1384 cpu_buffer->head_page = page; in rb_set_head_page()
1399 unsigned long *ptr = (unsigned long *)&old->list.prev->next; in rb_head_page_replace()
1405 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); in rb_head_page_replace()
1409 * rb_tail_page_update - move the tail page forward
1427 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update()
1428 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1441 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1456 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update()
1457 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1464 local_set(&next_page->page->commit, 0); in rb_tail_page_update()
1467 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page)) in rb_tail_page_update()
1468 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1484 rb_list_head(rb_list_head(list->next)->prev) != list)) in rb_check_links()
1488 rb_list_head(rb_list_head(list->prev)->next) != list)) in rb_check_links()
1495 * rb_check_pages - integrity check of buffer pages
1515 * time when interrupts are disabled non-deterministic, dependent on the in rb_check_pages()
1516 * ring buffer size. Therefore, the code releases and re-acquires the in rb_check_pages()
1522 * giving up. This is acceptable because this is only a self-validation in rb_check_pages()
1531 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1532 head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1535 buffer_cnt = cpu_buffer->cnt; in rb_check_pages()
1537 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1540 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1542 if (buffer_cnt != cpu_buffer->cnt) { in rb_check_pages()
1544 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1548 tmp = rb_list_head(tmp->next); in rb_check_pages()
1556 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1560 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1564 * Take an address, add the meta data size as well as the array of
1565 * array subbuffer indexes, then align it to a subbuffer size.
1582 int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_range_meta()
1583 unsigned long ptr = buffer->range_addr_start; in rb_range_meta()
1593 nr_subbufs = meta->nr_subbufs; in rb_range_meta()
1619 size = ptr - p; in rb_range_meta()
1620 ptr += size * (cpu - 2); in rb_range_meta()
1629 int subbuf_size = meta->subbuf_size; in rb_subbufs_from_meta()
1633 ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs); in rb_subbufs_from_meta()
1639 * Return a specific sub-buffer for a given @cpu defined by @idx.
1647 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); in rb_range_buffer()
1651 if (WARN_ON_ONCE(idx >= meta->nr_subbufs)) in rb_range_buffer()
1654 subbuf_size = meta->subbuf_size; in rb_range_buffer()
1656 /* Map this buffer to the order that's in meta->buffers[] */ in rb_range_buffer()
1657 idx = meta->buffers[idx]; in rb_range_buffer()
1662 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end) in rb_range_buffer()
1688 if (meta->magic != RING_BUFFER_META_MAGIC || in rb_meta_valid()
1689 meta->struct_size != sizeof(*meta)) { in rb_meta_valid()
1695 if (meta->subbuf_size != subbuf_size || in rb_meta_valid()
1696 meta->nr_subbufs != nr_pages + 1) { in rb_meta_valid()
1701 buffers_start = meta->first_buffer; in rb_meta_valid()
1702 buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs); in rb_meta_valid()
1705 if (meta->head_buffer < buffers_start || in rb_meta_valid()
1706 meta->head_buffer >= buffers_end) { in rb_meta_valid()
1711 if (meta->commit_buffer < buffers_start || in rb_meta_valid()
1712 meta->commit_buffer >= buffers_end) { in rb_meta_valid()
1719 bitmap_clear(subbuf_mask, 0, meta->nr_subbufs); in rb_meta_valid()
1722 for (i = 0; i < meta->nr_subbufs; i++) { in rb_meta_valid()
1723 if (meta->buffers[i] < 0 || in rb_meta_valid()
1724 meta->buffers[i] >= meta->nr_subbufs) { in rb_meta_valid()
1725 pr_info("Ring buffer boot meta [%d] array out of range\n", cpu); in rb_meta_valid()
1729 if ((unsigned)local_read(&subbuf->commit) > subbuf_size) { in rb_meta_valid()
1734 if (test_bit(meta->buffers[i], subbuf_mask)) { in rb_meta_valid()
1735 pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu); in rb_meta_valid()
1739 set_bit(meta->buffers[i], subbuf_mask); in rb_meta_valid()
1759 ts = dpage->time_stamp; in rb_read_data_buffer()
1763 event = (struct ring_buffer_event *)(dpage->data + e); in rb_read_data_buffer()
1765 switch (event->type_len) { in rb_read_data_buffer()
1778 return -1; in rb_read_data_buffer()
1784 if (event->time_delta == 1) in rb_read_data_buffer()
1789 ts += event->time_delta; in rb_read_data_buffer()
1793 return -1; in rb_read_data_buffer()
1806 tail = local_read(&dpage->commit); in rb_validate_buffer()
1813 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_meta_validate_events()
1820 if (!meta || !meta->head_buffer) in rb_meta_validate_events()
1824 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1830 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); in rb_meta_validate_events()
1831 local_set(&cpu_buffer->reader_page->entries, ret); in rb_meta_validate_events()
1833 head_page = cpu_buffer->head_page; in rb_meta_validate_events()
1836 if (head_page == cpu_buffer->reader_page && in rb_meta_validate_events()
1837 head_page == cpu_buffer->commit_page) in rb_meta_validate_events()
1841 for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) { in rb_meta_validate_events()
1844 if (head_page == cpu_buffer->reader_page) in rb_meta_validate_events()
1847 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1850 cpu_buffer->cpu); in rb_meta_validate_events()
1856 local_inc(&cpu_buffer->pages_touched); in rb_meta_validate_events()
1859 entry_bytes += local_read(&head_page->page->commit); in rb_meta_validate_events()
1860 local_set(&cpu_buffer->head_page->entries, ret); in rb_meta_validate_events()
1862 if (head_page == cpu_buffer->commit_page) in rb_meta_validate_events()
1866 if (head_page != cpu_buffer->commit_page) { in rb_meta_validate_events()
1868 cpu_buffer->cpu); in rb_meta_validate_events()
1872 local_set(&cpu_buffer->entries, entries); in rb_meta_validate_events()
1873 local_set(&cpu_buffer->entries_bytes, entry_bytes); in rb_meta_validate_events()
1875 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); in rb_meta_validate_events()
1880 meta->head_buffer = 0; in rb_meta_validate_events()
1881 meta->commit_buffer = 0; in rb_meta_validate_events()
1884 local_set(&cpu_buffer->reader_page->entries, 0); in rb_meta_validate_events()
1885 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_meta_validate_events()
1888 for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) { in rb_meta_validate_events()
1889 local_set(&head_page->entries, 0); in rb_meta_validate_events()
1890 local_set(&head_page->page->commit, 0); in rb_meta_validate_events()
1902 meta->text_addr = THIS_TEXT_PTR; in rb_meta_init_text_addr()
1903 meta->data_addr = THIS_DATA_PTR; in rb_meta_init_text_addr()
1915 /* Create a mask to test the subbuf array */ in rb_range_meta_init()
1927 delta = (unsigned long)subbuf - meta->first_buffer; in rb_range_meta_init()
1928 meta->first_buffer += delta; in rb_range_meta_init()
1929 meta->head_buffer += delta; in rb_range_meta_init()
1930 meta->commit_buffer += delta; in rb_range_meta_init()
1931 buffer->last_text_delta = THIS_TEXT_PTR - meta->text_addr; in rb_range_meta_init()
1932 buffer->last_data_delta = THIS_DATA_PTR - meta->data_addr; in rb_range_meta_init()
1936 if (cpu < nr_cpu_ids - 1) in rb_range_meta_init()
1939 next_meta = (void *)buffer->range_addr_end; in rb_range_meta_init()
1941 memset(meta, 0, next_meta - (void *)meta); in rb_range_meta_init()
1943 meta->magic = RING_BUFFER_META_MAGIC; in rb_range_meta_init()
1944 meta->struct_size = sizeof(*meta); in rb_range_meta_init()
1946 meta->nr_subbufs = nr_pages + 1; in rb_range_meta_init()
1947 meta->subbuf_size = PAGE_SIZE; in rb_range_meta_init()
1951 meta->first_buffer = (unsigned long)subbuf; in rb_range_meta_init()
1955 * The buffers[] array holds the order of the sub-buffers in rb_range_meta_init()
1956 * that are after the meta data. The sub-buffers may in rb_range_meta_init()
1959 * remain the same, the buffers[] array contains the in rb_range_meta_init()
1960 * index into the sub-buffers holding their actual order. in rb_range_meta_init()
1962 for (i = 0; i < meta->nr_subbufs; i++) { in rb_range_meta_init()
1963 meta->buffers[i] = i; in rb_range_meta_init()
1965 subbuf += meta->subbuf_size; in rb_range_meta_init()
1973 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start()
1974 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rbm_start()
1980 if (*pos > meta->nr_subbufs) in rbm_start()
1998 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show()
1999 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rbm_show()
2004 rb_meta_subbuf_idx(meta, (void *)meta->head_buffer)); in rbm_show()
2006 rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer)); in rbm_show()
2007 seq_printf(m, "subbuf_size: %d\n", meta->subbuf_size); in rbm_show()
2008 seq_printf(m, "nr_subbufs: %d\n", meta->nr_subbufs); in rbm_show()
2012 val -= 2; in rbm_show()
2013 seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]); in rbm_show()
2038 m = file->private_data; in ring_buffer_meta_seq_init()
2039 m->private = buffer->buffers[cpu]; in ring_buffer_meta_seq_init()
2048 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_meta_buffer_update()
2050 if (meta->head_buffer == (unsigned long)bpage->page) in rb_meta_buffer_update()
2051 cpu_buffer->head_page = bpage; in rb_meta_buffer_update()
2053 if (meta->commit_buffer == (unsigned long)bpage->page) { in rb_meta_buffer_update()
2054 cpu_buffer->commit_page = bpage; in rb_meta_buffer_update()
2055 cpu_buffer->tail_page = bpage; in rb_meta_buffer_update()
2062 struct trace_buffer *buffer = cpu_buffer->buffer; in __rb_allocate_pages()
2065 bool user_thread = current->mm != NULL; in __rb_allocate_pages()
2078 return -ENOMEM; in __rb_allocate_pages()
2082 * gracefully without invoking oom-killer and the system is not in __rb_allocate_pages()
2099 if (buffer->range_addr_start) in __rb_allocate_pages()
2100 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); in __rb_allocate_pages()
2106 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
2116 list_add_tail(&bpage->list, pages); in __rb_allocate_pages()
2120 bpage->page = rb_range_buffer(cpu_buffer, i + 1); in __rb_allocate_pages()
2121 if (!bpage->page) in __rb_allocate_pages()
2124 if (meta->head_buffer) in __rb_allocate_pages()
2126 bpage->range = 1; in __rb_allocate_pages()
2127 bpage->id = i + 1; in __rb_allocate_pages()
2129 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), in __rb_allocate_pages()
2131 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
2134 bpage->page = page_address(page); in __rb_allocate_pages()
2135 rb_init_page(bpage->page); in __rb_allocate_pages()
2137 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
2149 list_del_init(&bpage->list); in __rb_allocate_pages()
2155 return -ENOMEM; in __rb_allocate_pages()
2166 return -ENOMEM; in rb_allocate_pages()
2173 cpu_buffer->pages = pages.next; in rb_allocate_pages()
2176 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
2197 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
2198 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
2199 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
2200 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
2201 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
2202 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
2203 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
2204 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
2205 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
2206 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
2207 mutex_init(&cpu_buffer->mapping_lock); in rb_allocate_cpu_buffer()
2216 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
2218 if (buffer->range_addr_start) { in rb_allocate_cpu_buffer()
2223 cpu_buffer->mapped = 1; in rb_allocate_cpu_buffer()
2224 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); in rb_allocate_cpu_buffer()
2225 bpage->page = rb_range_buffer(cpu_buffer, 0); in rb_allocate_cpu_buffer()
2226 if (!bpage->page) in rb_allocate_cpu_buffer()
2228 if (cpu_buffer->ring_meta->head_buffer) in rb_allocate_cpu_buffer()
2230 bpage->range = 1; in rb_allocate_cpu_buffer()
2234 cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
2237 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
2238 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
2241 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
2242 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
2251 meta = cpu_buffer->ring_meta; in rb_allocate_cpu_buffer()
2252 if (!meta || !meta->head_buffer || in rb_allocate_cpu_buffer()
2253 !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) { in rb_allocate_cpu_buffer()
2254 if (meta && meta->head_buffer && in rb_allocate_cpu_buffer()
2255 (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) { in rb_allocate_cpu_buffer()
2257 if (!cpu_buffer->head_page) in rb_allocate_cpu_buffer()
2259 if (!cpu_buffer->commit_page) in rb_allocate_cpu_buffer()
2261 if (!cpu_buffer->tail_page) in rb_allocate_cpu_buffer()
2265 cpu_buffer->head_page in rb_allocate_cpu_buffer()
2266 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
2267 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
2271 if (cpu_buffer->ring_meta) in rb_allocate_cpu_buffer()
2272 meta->commit_buffer = meta->head_buffer; in rb_allocate_cpu_buffer()
2281 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
2290 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
2293 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
2295 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
2301 list_del_init(&bpage->list); in rb_free_cpu_buffer()
2308 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
2331 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in alloc_buffer()
2334 buffer->subbuf_order = order; in alloc_buffer()
2336 buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE; in alloc_buffer()
2338 /* Max payload is buffer page size - header (8bytes) */ in alloc_buffer()
2339 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2); in alloc_buffer()
2341 buffer->flags = flags; in alloc_buffer()
2342 buffer->clock = trace_clock_local; in alloc_buffer()
2343 buffer->reader_lock_key = key; in alloc_buffer()
2345 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in alloc_buffer()
2346 init_waitqueue_head(&buffer->irq_work.waiters); in alloc_buffer()
2348 buffer->cpus = nr_cpu_ids; in alloc_buffer()
2351 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in alloc_buffer()
2353 if (!buffer->buffers) in alloc_buffer()
2361 size = end - start; in alloc_buffer()
2365 * The number of sub-buffers (nr_pages) is determined by the in alloc_buffer()
2368 * needed, plus account for the integer array index that in alloc_buffer()
2371 nr_pages = (size - sizeof(struct ring_buffer_meta)) / in alloc_buffer()
2388 nr_pages--; in alloc_buffer()
2393 nr_pages--; in alloc_buffer()
2394 buffer->range_addr_start = start; in alloc_buffer()
2395 buffer->range_addr_end = end; in alloc_buffer()
2401 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in alloc_buffer()
2407 cpumask_set_cpu(cpu, buffer->cpumask); in alloc_buffer()
2408 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in alloc_buffer()
2409 if (!buffer->buffers[cpu]) in alloc_buffer()
2412 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in alloc_buffer()
2416 mutex_init(&buffer->mutex); in alloc_buffer()
2422 if (buffer->buffers[cpu]) in alloc_buffer()
2423 rb_free_cpu_buffer(buffer->buffers[cpu]); in alloc_buffer()
2425 kfree(buffer->buffers); in alloc_buffer()
2428 free_cpumask_var(buffer->cpumask); in alloc_buffer()
2436 * __ring_buffer_alloc - allocate a new ring_buffer
2449 /* Default buffer page size - one system page */ in __ring_buffer_alloc()
2456 * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
2459 * @order: sub-buffer order
2478 * ring_buffer_last_boot_delta - return the delta offset from last boot
2491 if (!buffer->last_text_delta) in ring_buffer_last_boot_delta()
2494 *text = buffer->last_text_delta; in ring_buffer_last_boot_delta()
2495 *data = buffer->last_data_delta; in ring_buffer_last_boot_delta()
2501 * ring_buffer_free - free a ring buffer.
2509 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
2511 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
2514 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
2516 kfree(buffer->buffers); in ring_buffer_free()
2517 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
2526 buffer->clock = clock; in ring_buffer_set_clock()
2531 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
2536 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
2541 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
2546 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
2561 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2562 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
2572 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
2578 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
2579 tail_page = rb_list_head(tail_page->next); in rb_remove_pages()
2583 first_page = list_entry(rb_list_head(to_remove->next), in rb_remove_pages()
2587 to_remove = rb_list_head(to_remove)->next; in rb_remove_pages()
2591 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
2593 next_page = rb_list_head(to_remove)->next; in rb_remove_pages()
2600 tail_page->next = (struct list_head *)((unsigned long)next_page | in rb_remove_pages()
2603 next_page->prev = tail_page; in rb_remove_pages()
2606 cpu_buffer->pages = next_page; in rb_remove_pages()
2607 cpu_buffer->cnt++; in rb_remove_pages()
2611 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
2615 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
2616 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2618 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2640 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2641 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2642 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2650 nr_removed--; in rb_remove_pages()
2662 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2668 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2676 * 2. We cmpxchg the prev_page->next to point from head page to the in rb_insert_pages()
2678 * 3. Finally, we update the head->prev to the end of new list. in rb_insert_pages()
2685 while (retries--) { in rb_insert_pages()
2693 head_page = &hpage->list; in rb_insert_pages()
2694 prev_page = head_page->prev; in rb_insert_pages()
2696 first_page = pages->next; in rb_insert_pages()
2697 last_page = pages->prev; in rb_insert_pages()
2702 last_page->next = head_page_with_bit; in rb_insert_pages()
2703 first_page->prev = prev_page; in rb_insert_pages()
2706 if (try_cmpxchg(&prev_page->next, in rb_insert_pages()
2713 head_page->prev = last_page; in rb_insert_pages()
2714 cpu_buffer->cnt++; in rb_insert_pages()
2727 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2732 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2734 list_del_init(&bpage->list); in rb_insert_pages()
2745 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2749 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2752 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2760 complete(&cpu_buffer->update_done); in update_pages_handler()
2764 * ring_buffer_resize - resize the ring buffer
2769 * Minimum size is 2 * buffer->subbuf_size.
2781 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2788 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2791 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in ring_buffer_resize()
2798 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2799 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2808 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2809 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2810 err = -EBUSY; in ring_buffer_resize()
2817 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2819 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2820 cpu_buffer->nr_pages; in ring_buffer_resize()
2824 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2830 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2831 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2832 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2834 err = -ENOMEM; in ring_buffer_resize()
2848 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2849 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2855 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2862 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2864 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
2872 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2873 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2877 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2878 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2883 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2885 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2893 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2894 err = -EBUSY; in ring_buffer_resize()
2898 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2899 cpu_buffer->nr_pages; in ring_buffer_resize()
2901 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2902 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2903 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2904 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2905 err = -ENOMEM; in ring_buffer_resize()
2923 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2924 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2928 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2940 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2941 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2950 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2953 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2956 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2957 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2964 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2965 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2967 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2970 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2972 list_del_init(&bpage->list); in ring_buffer_resize()
2977 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2978 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2985 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2987 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2989 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2990 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2996 return bpage->page->data + index; in __rb_page_index()
3002 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
3003 cpu_buffer->reader_page->read); in rb_reader_event()
3010 struct buffer_page *iter_head_page = iter->head_page; in rb_iter_head_event()
3014 if (iter->head != iter->next_event) in rb_iter_head_event()
3015 return iter->event; in rb_iter_head_event()
3026 if (iter->head > commit - 8) in rb_iter_head_event()
3029 event = __rb_page_index(iter_head_page, iter->head); in rb_iter_head_event()
3038 if ((iter->head + length) > commit || length > iter->event_size) in rb_iter_head_event()
3042 memcpy(iter->event, event, length); in rb_iter_head_event()
3050 if (iter->page_stamp != iter_head_page->page->time_stamp || in rb_iter_head_event()
3054 iter->next_event = iter->head + length; in rb_iter_head_event()
3055 return iter->event; in rb_iter_head_event()
3058 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_head_event()
3059 iter->head = 0; in rb_iter_head_event()
3060 iter->next_event = 0; in rb_iter_head_event()
3061 iter->missed_events = 1; in rb_iter_head_event()
3074 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
3082 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
3084 return addr - BUF_PAGE_HDR_SIZE; in rb_event_index()
3089 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
3097 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
3098 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
3100 rb_inc_page(&iter->head_page); in rb_inc_iter()
3102 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter()
3103 iter->head = 0; in rb_inc_iter()
3104 iter->next_event = 0; in rb_inc_iter()
3107 /* Return the index into the sub-buffers for a given sub-buffer */
3112 subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs; in rb_meta_subbuf_idx()
3113 subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size); in rb_meta_subbuf_idx()
3114 return (subbuf - subbuf_array) / meta->subbuf_size; in rb_meta_subbuf_idx()
3120 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_head()
3121 unsigned long old_head = (unsigned long)next_page->page; in rb_update_meta_head()
3125 new_head = (unsigned long)next_page->page; in rb_update_meta_head()
3131 (void)cmpxchg(&meta->head_buffer, old_head, new_head); in rb_update_meta_head()
3137 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_reader()
3138 void *old_reader = cpu_buffer->reader_page->page; in rb_update_meta_reader()
3139 void *new_reader = reader->page; in rb_update_meta_reader()
3142 id = reader->id; in rb_update_meta_reader()
3143 cpu_buffer->reader_page->id = id; in rb_update_meta_reader()
3144 reader->id = 0; in rb_update_meta_reader()
3146 meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader); in rb_update_meta_reader()
3147 meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader); in rb_update_meta_reader()
3154 * rb_handle_head_page - writer hit the head page
3158 * -1 on error
3182 * NORMAL - an interrupt already moved it for us in rb_handle_head_page()
3183 * HEAD - we are the first to get here. in rb_handle_head_page()
3184 * UPDATE - we are the interrupt interrupting in rb_handle_head_page()
3186 * MOVED - a reader on another CPU moved the next in rb_handle_head_page()
3198 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
3199 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
3200 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
3202 if (cpu_buffer->ring_meta) in rb_handle_head_page()
3234 return -1; in rb_handle_head_page()
3259 * HEAD - an interrupt came in and already set it. in rb_handle_head_page()
3260 * NORMAL - One of two things: in rb_handle_head_page()
3272 return -1; in rb_handle_head_page()
3288 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
3311 return -1; in rb_handle_head_page()
3321 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
3322 struct buffer_page *tail_page = info->tail_page; in rb_reset_tail()
3324 unsigned long length = info->length; in rb_reset_tail()
3337 tail_page->real_end = 0; in rb_reset_tail()
3339 local_sub(length, &tail_page->write); in rb_reset_tail()
3350 tail_page->real_end = tail; in rb_reset_tail()
3364 if (tail > (bsize - RB_EVNT_MIN_SIZE)) { in rb_reset_tail()
3374 local_sub(length, &tail_page->write); in rb_reset_tail()
3379 event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
3380 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
3382 event->time_delta = 1; in rb_reset_tail()
3385 local_add(bsize - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
3387 /* Make sure the padding is visible before the tail_page->write update */ in rb_reset_tail()
3391 length = (tail + length) - bsize; in rb_reset_tail()
3392 local_sub(length, &tail_page->write); in rb_reset_tail()
3404 struct buffer_page *tail_page = info->tail_page; in rb_move_tail()
3405 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
3406 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
3420 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3438 if (rb_is_head_page(next_page, &tail_page->list)) { in rb_move_tail()
3444 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
3449 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
3450 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
3472 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
3473 cpu_buffer->tail_page) && in rb_move_tail()
3474 (cpu_buffer->commit_page == in rb_move_tail()
3475 cpu_buffer->reader_page))) { in rb_move_tail()
3476 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3491 local_inc(&cpu_buffer->committing); in rb_move_tail()
3494 return ERR_PTR(-EAGAIN); in rb_move_tail()
3509 event->type_len = RINGBUF_TYPE_TIME_STAMP; in rb_add_time_stamp()
3511 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
3515 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
3516 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
3519 event->time_delta = 0; in rb_add_time_stamp()
3520 event->array[0] = 0; in rb_add_time_stamp()
3540 (unsigned long long)info->delta, in rb_check_timestamp()
3541 (unsigned long long)info->ts, in rb_check_timestamp()
3542 (unsigned long long)info->before, in rb_check_timestamp()
3543 (unsigned long long)info->after, in rb_check_timestamp()
3544 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}), in rb_check_timestamp()
3558 bool abs = info->add_timestamp & in rb_add_timestamp()
3561 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
3566 if (abs && (info->ts & TS_MSB)) { in rb_add_timestamp()
3567 info->delta &= ABS_TS_MASK; in rb_add_timestamp()
3570 } else if (info->before == info->after && info->before > info->ts) { in rb_add_timestamp()
3580 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
3581 info->before, info->ts); in rb_add_timestamp()
3586 info->delta = 0; in rb_add_timestamp()
3588 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs); in rb_add_timestamp()
3589 *length -= RB_LEN_TIME_EXTEND; in rb_add_timestamp()
3594 * rb_update_event - update event type and data
3609 unsigned length = info->length; in rb_update_event()
3610 u64 delta = info->delta; in rb_update_event()
3611 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event() local
3613 if (!WARN_ON_ONCE(nest >= MAX_NEST)) in rb_update_event()
3614 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
3620 if (unlikely(info->add_timestamp)) in rb_update_event()
3623 event->time_delta = delta; in rb_update_event()
3624 length -= RB_EVNT_HDR_SIZE; in rb_update_event()
3626 event->type_len = 0; in rb_update_event()
3627 event->array[0] = length; in rb_update_event()
3629 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
3634 struct ring_buffer_event event; /* Used only for sizeof array */ in rb_calculate_event_length()
3641 length += sizeof(event.array[0]); in rb_calculate_event_length()
3675 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
3677 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3683 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
3685 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
3699 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3719 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { in rb_try_to_discard()
3721 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3732 local_inc(&cpu_buffer->committing); in rb_start_commit()
3733 local_inc(&cpu_buffer->commits); in rb_start_commit()
3750 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3752 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3753 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3756 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3762 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3763 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3764 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3765 if (cpu_buffer->ring_meta) { in rb_set_commit_to_write()
3766 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_set_commit_to_write()
3767 meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page; in rb_set_commit_to_write()
3773 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3777 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3778 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3780 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3793 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3802 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3806 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3809 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3812 local_dec(&cpu_buffer->committing); in rb_end_commit()
3822 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3823 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3824 local_inc(&cpu_buffer->committing); in rb_end_commit()
3834 /* array[0] holds the actual length for the discarded event */ in rb_event_discard()
3835 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
3836 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
3838 if (!event->time_delta) in rb_event_discard()
3839 event->time_delta = 1; in rb_event_discard()
3844 local_inc(&cpu_buffer->entries); in rb_commit()
3851 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3852 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3854 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3857 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3858 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3860 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3863 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3866 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3869 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3872 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3874 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3877 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3878 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3880 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3917 * 101 - 1 = 100
3920 * 1010 - 1 = 1001
3955 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3958 bit = RB_CTX_NORMAL - bit; in trace_recursive_lock()
3960 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3967 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3973 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3974 cpu_buffer->current_context = val; in trace_recursive_lock()
3982 cpu_buffer->current_context &= in trace_recursive_unlock()
3983 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3990 * ring_buffer_nest_start - Allow to trace while nested
3996 * will allow this function to nest within a currently active
4010 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
4012 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
4016 * ring_buffer_nest_end - Allow to trace while nested
4029 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
4031 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
4036 * ring_buffer_unlock_commit - commit a reserved
4048 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
4089 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) in show_flags()
4094 if (entry->flags & TRACE_FLAG_SOFTIRQ) in show_flags()
4097 if (entry->flags & TRACE_FLAG_HARDIRQ) in show_flags()
4100 if (entry->flags & TRACE_FLAG_NMI) in show_flags()
4110 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) in show_irq()
4114 if (entry->flags & TRACE_FLAG_IRQS_OFF) in show_irq()
4144 ts = bpage->time_stamp; in dump_buffer_page()
4149 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
4151 switch (event->type_len) { in dump_buffer_page()
4168 ts += event->time_delta; in dump_buffer_page()
4170 e, ts, event->time_delta); in dump_buffer_page()
4174 ts += event->time_delta; in dump_buffer_page()
4176 e, ts, event->time_delta, in dump_buffer_page()
4197 atomic_inc(&cpu_buffer->record_disabled); \
4203 /* Do not re-enable checking */ \
4220 bpage = info->tail_page->page; in check_buffer()
4224 tail = local_read(&bpage->commit); in check_buffer()
4225 } else if (info->add_timestamp & in check_buffer()
4235 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
4244 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); in check_buffer()
4248 cpu_buffer->cpu, ts, delta); in check_buffer()
4252 if ((full && ts > info->ts) || in check_buffer()
4253 (!full && ts + info->delta != info->ts)) { in check_buffer()
4255 cpu_buffer->cpu, in check_buffer()
4256 ts + info->delta, info->ts, info->delta, in check_buffer()
4257 info->before, info->after, in check_buffer()
4279 /* Don't let the compiler play games with cpu_buffer->tail_page */ in __rb_reserve_next()
4280 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
4282 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next()
4284 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4285 rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4287 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4289 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { in __rb_reserve_next()
4290 info->delta = info->ts; in __rb_reserve_next()
4298 /* Use the sub-buffer timestamp */ in __rb_reserve_next()
4299 info->delta = 0; in __rb_reserve_next()
4300 } else if (unlikely(info->before != info->after)) { in __rb_reserve_next()
4301 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
4302 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
4304 info->delta = info->ts - info->after; in __rb_reserve_next()
4305 if (unlikely(test_time_stamp(info->delta))) { in __rb_reserve_next()
4306 info->add_timestamp |= RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
4307 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
4312 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
4314 /*C*/ write = local_add_return(info->length, &tail_page->write); in __rb_reserve_next()
4319 tail = write - info->length; in __rb_reserve_next()
4322 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
4329 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
4336 if (likely(!(info->add_timestamp & in __rb_reserve_next()
4339 info->delta = info->ts - info->after; in __rb_reserve_next()
4342 info->delta = info->ts; in __rb_reserve_next()
4346 /* SLOW PATH - Interrupted between A and C */ in __rb_reserve_next()
4349 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4357 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4358 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
4361 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4363 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && in __rb_reserve_next()
4364 info->after == info->before && info->after < ts) { in __rb_reserve_next()
4367 * safe to use info->after for the delta as it in __rb_reserve_next()
4368 * matched info->before and is still valid. in __rb_reserve_next()
4370 info->delta = ts - info->after; in __rb_reserve_next()
4380 info->delta = 0; in __rb_reserve_next()
4382 info->ts = ts; in __rb_reserve_next()
4383 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; in __rb_reserve_next()
4390 if (unlikely(!tail && !(info->add_timestamp & in __rb_reserve_next()
4392 info->delta = 0; in __rb_reserve_next()
4399 local_inc(&tail_page->entries); in __rb_reserve_next()
4406 tail_page->page->time_stamp = info->ts; in __rb_reserve_next()
4409 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
4446 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
4447 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
4448 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
4455 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
4458 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
4482 if (unlikely(PTR_ERR(event) == -EAGAIN)) { in rb_reserve_next_event()
4484 info.length -= RB_LEN_TIME_EXTEND; in rb_reserve_next_event()
4496 * ring_buffer_lock_reserve - reserve a part of the buffer
4520 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
4525 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
4528 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
4530 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
4533 if (unlikely(length > buffer->max_data_size)) in ring_buffer_lock_reserve()
4564 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
4567 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
4570 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
4571 local_dec(&bpage->entries); in rb_decrement_entry()
4582 if (bpage->page == (void *)addr) { in rb_decrement_entry()
4583 local_dec(&bpage->entries); in rb_decrement_entry()
4594 * ring_buffer_discard_commit - discard an event that has not been committed
4622 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
4629 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
4646 * ring_buffer_write - write data to the buffer without reserving
4665 int ret = -EBUSY; in ring_buffer_write()
4670 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
4675 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
4678 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4680 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
4683 if (length > buffer->max_data_size) in ring_buffer_write()
4722 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4723 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4732 * ring_buffer_record_disable - stop all writes into the buffer
4742 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
4747 * ring_buffer_record_enable - enable writes to the buffer
4755 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4760 * ring_buffer_record_off - stop all writes into the buffer
4775 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4778 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
4783 * ring_buffer_record_on - restart writes into the buffer
4798 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4801 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4806 * ring_buffer_record_is_on - return true if the ring buffer can write
4813 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4817 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4829 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4833 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4846 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4849 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4850 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4855 * ring_buffer_record_enable_cpu - enable writes to the buffer
4866 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4869 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4870 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4875 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4886 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4889 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4890 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4895 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4896 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4900 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
4901 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4908 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4917 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4920 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4921 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4928 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4936 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4939 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4946 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4956 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4959 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4960 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4967 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4979 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4982 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4983 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4990 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
5001 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
5004 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
5005 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
5012 * ring_buffer_read_events_cpu - get the number of events successfully read
5021 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
5024 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
5025 return cpu_buffer->read; in ring_buffer_read_events_cpu()
5030 * ring_buffer_entries - get the number of entries in a buffer
5044 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
5053 * ring_buffer_overruns - get the number of overruns in buffer
5067 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
5068 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
5077 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
5080 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
5081 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
5082 iter->next_event = iter->head; in rb_iter_reset()
5084 iter->cache_reader_page = iter->head_page; in rb_iter_reset()
5085 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
5086 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
5088 if (iter->head) { in rb_iter_reset()
5089 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
5090 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
5092 iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_reset()
5093 iter->page_stamp = iter->read_stamp; in rb_iter_reset()
5098 * ring_buffer_iter_reset - reset an iterator
5112 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
5114 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5116 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5121 * ring_buffer_iter_empty - check if an iterator has no more to read
5135 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
5136 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
5137 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
5138 commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5139 commit_ts = commit_page->page->time_stamp; in ring_buffer_iter_empty()
5152 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5153 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); in ring_buffer_iter_empty()
5161 return ((iter->head_page == commit_page && iter->head >= commit) || in ring_buffer_iter_empty()
5162 (iter->head_page == reader && commit_page == head_page && in ring_buffer_iter_empty()
5163 head_page->read == commit && in ring_buffer_iter_empty()
5164 iter->head == rb_page_size(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
5174 switch (event->type_len) { in rb_update_read_stamp()
5180 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
5185 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
5186 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
5190 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
5204 switch (event->type_len) { in rb_update_iter_read_stamp()
5210 iter->read_stamp += delta; in rb_update_iter_read_stamp()
5215 delta = rb_fix_abs_ts(delta, iter->read_stamp); in rb_update_iter_read_stamp()
5216 iter->read_stamp = delta; in rb_update_iter_read_stamp()
5220 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
5224 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
5232 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
5239 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
5253 reader = cpu_buffer->reader_page; in rb_get_reader_page()
5256 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
5261 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
5266 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
5276 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
5277 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
5278 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
5279 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
5288 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
5289 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
5292 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
5296 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
5299 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
5311 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
5324 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
5332 if (cpu_buffer->ring_meta) in rb_get_reader_page()
5340 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
5341 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
5343 cpu_buffer->cnt++; in rb_get_reader_page()
5344 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
5347 cpu_buffer->reader_page = reader; in rb_get_reader_page()
5348 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
5350 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
5351 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
5352 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
5359 if (reader && reader->read == 0) in rb_get_reader_page()
5360 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
5362 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
5414 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
5415 cpu_buffer->read++; in rb_advance_reader()
5420 cpu_buffer->reader_page->read += length; in rb_advance_reader()
5421 cpu_buffer->read_bytes += length; in rb_advance_reader()
5428 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
5431 if (iter->head == iter->next_event) { in rb_advance_iter()
5437 iter->head = iter->next_event; in rb_advance_iter()
5442 if (iter->next_event >= rb_page_size(iter->head_page)) { in rb_advance_iter()
5444 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
5450 rb_update_iter_read_stamp(iter, iter->event); in rb_advance_iter()
5455 return cpu_buffer->lost_events; in rb_lost_events()
5484 switch (event->type_len) { in rb_buffer_peek()
5506 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); in rb_buffer_peek()
5507 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5508 cpu_buffer->cpu, ts); in rb_buffer_peek()
5516 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
5517 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5518 cpu_buffer->cpu, ts); in rb_buffer_peek()
5543 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
5544 buffer = cpu_buffer->buffer; in rb_iter_peek()
5551 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
5552 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
5553 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
5573 if (iter->head >= rb_page_size(iter->head_page)) { in rb_iter_peek()
5582 switch (event->type_len) { in rb_iter_peek()
5599 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); in rb_iter_peek()
5600 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
5601 cpu_buffer->cpu, ts); in rb_iter_peek()
5609 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
5611 cpu_buffer->cpu, ts); in rb_iter_peek()
5626 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
5639 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
5643 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
5651 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
5655 * ring_buffer_peek - peek at the next event to be read
5668 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5673 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
5680 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
5685 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
5691 /** ring_buffer_iter_dropped - report if there are dropped events
5698 bool ret = iter->missed_events != 0; in ring_buffer_iter_dropped()
5700 iter->missed_events = 0; in ring_buffer_iter_dropped()
5706 * ring_buffer_iter_peek - peek at the next event to be read
5716 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5721 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5723 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5725 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
5732 * ring_buffer_consume - return an event and consume it
5755 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5758 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5764 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5774 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
5782 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5804 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5812 iter->event_size = buffer->subbuf_size; in ring_buffer_read_prepare()
5813 iter->event = kmalloc(iter->event_size, flags); in ring_buffer_read_prepare()
5814 if (!iter->event) { in ring_buffer_read_prepare()
5819 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5821 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5823 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5830 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5844 * ring_buffer_read_start - start a non consuming read of the buffer
5863 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5865 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5866 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5868 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5869 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5874 * ring_buffer_read_finish - finish reading the iterator of the buffer
5877 * This re-enables resizing of the buffer, and frees the iterator.
5882 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5887 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5888 kfree(iter->event); in ring_buffer_read_finish()
5894 * ring_buffer_iter_advance - advance the iterator to the next location
5902 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
5905 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5909 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5914 * ring_buffer_size - return the size of the ring buffer (in bytes)
5920 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5923 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5928 * ring_buffer_max_event_size - return the max data size of an event
5937 return buffer->max_data_size - RB_LEN_TIME_EXTEND; in ring_buffer_max_event_size()
5938 return buffer->max_data_size; in ring_buffer_max_event_size()
5944 local_set(&page->write, 0); in rb_clear_buffer_page()
5945 local_set(&page->entries, 0); in rb_clear_buffer_page()
5946 rb_init_page(page->page); in rb_clear_buffer_page()
5947 page->read = 0; in rb_clear_buffer_page()
5952 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_update_meta_page()
5957 meta->reader.read = cpu_buffer->reader_page->read; in rb_update_meta_page()
5958 meta->reader.id = cpu_buffer->reader_page->id; in rb_update_meta_page()
5959 meta->reader.lost_events = cpu_buffer->lost_events; in rb_update_meta_page()
5961 meta->entries = local_read(&cpu_buffer->entries); in rb_update_meta_page()
5962 meta->overrun = local_read(&cpu_buffer->overrun); in rb_update_meta_page()
5963 meta->read = cpu_buffer->read; in rb_update_meta_page()
5965 /* Some archs do not have data cache coherency between kernel and user-space */ in rb_update_meta_page()
5966 flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page)); in rb_update_meta_page()
5976 cpu_buffer->head_page in rb_reset_cpu()
5977 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5978 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5979 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5983 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5984 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5986 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5987 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5988 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5990 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5991 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5992 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5993 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5994 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5995 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5996 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5997 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5998 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5999 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
6000 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
6001 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
6002 cpu_buffer->read = 0; in rb_reset_cpu()
6003 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
6005 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
6006 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
6008 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
6010 cpu_buffer->lost_events = 0; in rb_reset_cpu()
6011 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
6014 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
6016 if (cpu_buffer->mapped) { in rb_reset_cpu()
6018 if (cpu_buffer->ring_meta) { in rb_reset_cpu()
6019 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_reset_cpu()
6020 meta->commit_buffer = meta->head_buffer; in rb_reset_cpu()
6030 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
6032 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
6035 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6039 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6042 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
6046 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
6052 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
6055 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
6059 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
6061 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6062 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6069 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6070 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6073 meta = rb_range_meta(buffer, 0, cpu_buffer->cpu); in ring_buffer_reset_cpu()
6077 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
6085 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
6095 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
6098 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6100 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6101 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6108 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6114 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
6120 meta = rb_range_meta(buffer, 0, cpu_buffer->cpu); in ring_buffer_reset_online_cpus()
6124 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6125 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6128 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
6132 * ring_buffer_reset - reset a ring buffer
6141 mutex_lock(&buffer->mutex); in ring_buffer_reset()
6144 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6146 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6147 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
6154 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6158 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
6159 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6162 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
6167 * ring_buffer_empty - is the ring buffer empty?
6180 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
6196 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
6207 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
6210 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
6223 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
6238 int ret = -EINVAL; in ring_buffer_swap_cpu()
6240 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
6241 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
6244 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
6245 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
6248 if (WARN_ON_ONCE(cpu_buffer_a->mapped || cpu_buffer_b->mapped)) { in ring_buffer_swap_cpu()
6249 ret = -EBUSY; in ring_buffer_swap_cpu()
6254 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
6257 if (buffer_a->subbuf_order != buffer_b->subbuf_order) in ring_buffer_swap_cpu()
6260 ret = -EAGAIN; in ring_buffer_swap_cpu()
6262 if (atomic_read(&buffer_a->record_disabled)) in ring_buffer_swap_cpu()
6265 if (atomic_read(&buffer_b->record_disabled)) in ring_buffer_swap_cpu()
6268 if (atomic_read(&cpu_buffer_a->record_disabled)) in ring_buffer_swap_cpu()
6271 if (atomic_read(&cpu_buffer_b->record_disabled)) in ring_buffer_swap_cpu()
6280 atomic_inc(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
6281 atomic_inc(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
6283 ret = -EBUSY; in ring_buffer_swap_cpu()
6284 if (local_read(&cpu_buffer_a->committing)) in ring_buffer_swap_cpu()
6286 if (local_read(&cpu_buffer_b->committing)) in ring_buffer_swap_cpu()
6293 if (atomic_read(&buffer_a->resizing)) in ring_buffer_swap_cpu()
6295 if (atomic_read(&buffer_b->resizing)) in ring_buffer_swap_cpu()
6298 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
6299 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
6301 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
6302 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
6307 atomic_dec(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
6308 atomic_dec(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
6316 * ring_buffer_alloc_read_page - allocate a page to read from buffer
6339 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
6340 return ERR_PTR(-ENODEV); in ring_buffer_alloc_read_page()
6344 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
6346 bpage->order = buffer->subbuf_order; in ring_buffer_alloc_read_page()
6347 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
6349 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6351 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
6352 bpage->data = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
6353 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
6356 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6359 if (bpage->data) in ring_buffer_alloc_read_page()
6364 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
6367 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
6370 bpage->data = page_address(page); in ring_buffer_alloc_read_page()
6373 rb_init_page(bpage->data); in ring_buffer_alloc_read_page()
6380 * ring_buffer_free_read_page - free an allocated read page
6391 struct buffer_data_page *bpage = data_page->data; in ring_buffer_free_read_page()
6395 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
6398 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
6402 * is different from the subbuffer order of the buffer - in ring_buffer_free_read_page()
6405 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order) in ring_buffer_free_read_page()
6409 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6411 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
6412 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
6416 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6420 free_pages((unsigned long)bpage, data_page->order); in ring_buffer_free_read_page()
6426 * ring_buffer_read_page - extract a page from the ring buffer
6463 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
6472 int ret = -1; in ring_buffer_read_page()
6474 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
6484 len -= BUF_PAGE_HDR_SIZE; in ring_buffer_read_page()
6486 if (!data_page || !data_page->data) in ring_buffer_read_page()
6488 if (data_page->order != buffer->subbuf_order) in ring_buffer_read_page()
6491 bpage = data_page->data; in ring_buffer_read_page()
6495 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
6503 read = reader->read; in ring_buffer_read_page()
6507 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
6516 if (read || (len < (commit - read)) || in ring_buffer_read_page()
6517 cpu_buffer->reader_page == cpu_buffer->commit_page || in ring_buffer_read_page()
6518 cpu_buffer->mapped) { in ring_buffer_read_page()
6519 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
6531 (!read || (len < (commit - read)) || in ring_buffer_read_page()
6532 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
6535 if (len > (commit - read)) in ring_buffer_read_page()
6536 len = (commit - read); in ring_buffer_read_page()
6545 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
6556 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
6558 len -= size; in ring_buffer_read_page()
6561 rpos = reader->read; in ring_buffer_read_page()
6573 local_set(&bpage->commit, pos); in ring_buffer_read_page()
6574 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
6580 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
6581 cpu_buffer->read_bytes += rb_page_size(reader); in ring_buffer_read_page()
6585 bpage = reader->page; in ring_buffer_read_page()
6586 reader->page = data_page->data; in ring_buffer_read_page()
6587 local_set(&reader->write, 0); in ring_buffer_read_page()
6588 local_set(&reader->entries, 0); in ring_buffer_read_page()
6589 reader->read = 0; in ring_buffer_read_page()
6590 data_page->data = bpage; in ring_buffer_read_page()
6597 if (reader->real_end) in ring_buffer_read_page()
6598 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
6602 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
6604 commit = local_read(&bpage->commit); in ring_buffer_read_page()
6612 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
6613 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
6615 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
6618 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
6624 if (commit < buffer->subbuf_size) in ring_buffer_read_page()
6625 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit); in ring_buffer_read_page()
6628 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
6636 * ring_buffer_read_page_data - get pointer to the data in the page.
6643 return page->data; in ring_buffer_read_page_data()
6648 * ring_buffer_subbuf_size_get - get size of the sub buffer.
6655 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_size_get()
6660 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
6674 return -EINVAL; in ring_buffer_subbuf_order_get()
6676 return buffer->subbuf_order; in ring_buffer_subbuf_order_get()
6681 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
6689 * 0 - 1 system page
6690 * 1 - 2 system pages
6691 * 3 - 4 system pages
6707 return -EINVAL; in ring_buffer_subbuf_order_set()
6709 if (buffer->subbuf_order == order) in ring_buffer_subbuf_order_set()
6714 return -EINVAL; in ring_buffer_subbuf_order_set()
6718 return -EINVAL; in ring_buffer_subbuf_order_set()
6720 old_order = buffer->subbuf_order; in ring_buffer_subbuf_order_set()
6721 old_size = buffer->subbuf_size; in ring_buffer_subbuf_order_set()
6724 mutex_lock(&buffer->mutex); in ring_buffer_subbuf_order_set()
6725 atomic_inc(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6730 buffer->subbuf_order = order; in ring_buffer_subbuf_order_set()
6731 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_order_set()
6736 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6739 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6741 if (cpu_buffer->mapped) { in ring_buffer_subbuf_order_set()
6742 err = -EBUSY; in ring_buffer_subbuf_order_set()
6747 nr_pages = old_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_subbuf_order_set()
6748 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size); in ring_buffer_subbuf_order_set()
6754 cpu_buffer->nr_pages_to_update = nr_pages; in ring_buffer_subbuf_order_set()
6760 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6762 &cpu_buffer->new_pages)) { in ring_buffer_subbuf_order_set()
6764 err = -ENOMEM; in ring_buffer_subbuf_order_set()
6774 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6777 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6779 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6791 list_add(&old_pages, cpu_buffer->pages); in ring_buffer_subbuf_order_set()
6792 list_add(&cpu_buffer->reader_page->list, &old_pages); in ring_buffer_subbuf_order_set()
6795 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, in ring_buffer_subbuf_order_set()
6797 list_del_init(&cpu_buffer->reader_page->list); in ring_buffer_subbuf_order_set()
6800 cpu_buffer->pages = cpu_buffer->new_pages.next; in ring_buffer_subbuf_order_set()
6801 list_del_init(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6802 cpu_buffer->cnt++; in ring_buffer_subbuf_order_set()
6804 cpu_buffer->head_page in ring_buffer_subbuf_order_set()
6805 = list_entry(cpu_buffer->pages, struct buffer_page, list); in ring_buffer_subbuf_order_set()
6806 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in ring_buffer_subbuf_order_set()
6808 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; in ring_buffer_subbuf_order_set()
6809 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_subbuf_order_set()
6811 old_free_data_page = cpu_buffer->free_page; in ring_buffer_subbuf_order_set()
6812 cpu_buffer->free_page = NULL; in ring_buffer_subbuf_order_set()
6816 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6820 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
6828 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6829 mutex_unlock(&buffer->mutex); in ring_buffer_subbuf_order_set()
6834 buffer->subbuf_order = old_order; in ring_buffer_subbuf_order_set()
6835 buffer->subbuf_size = old_size; in ring_buffer_subbuf_order_set()
6837 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6838 mutex_unlock(&buffer->mutex); in ring_buffer_subbuf_order_set()
6841 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6843 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_subbuf_order_set()
6846 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { in ring_buffer_subbuf_order_set()
6847 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
6860 if (cpu_buffer->meta_page) in rb_alloc_meta_page()
6865 return -ENOMEM; in rb_alloc_meta_page()
6867 cpu_buffer->meta_page = page_to_virt(page); in rb_alloc_meta_page()
6874 unsigned long addr = (unsigned long)cpu_buffer->meta_page; in rb_free_meta_page()
6877 cpu_buffer->meta_page = NULL; in rb_free_meta_page()
6883 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_setup_ids_meta_page()
6884 unsigned int nr_subbufs = cpu_buffer->nr_pages + 1; in rb_setup_ids_meta_page()
6888 subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page; in rb_setup_ids_meta_page()
6889 cpu_buffer->reader_page->id = id++; in rb_setup_ids_meta_page()
6896 subbuf_ids[id] = (unsigned long)subbuf->page; in rb_setup_ids_meta_page()
6897 subbuf->id = id; in rb_setup_ids_meta_page()
6904 cpu_buffer->subbuf_ids = subbuf_ids; in rb_setup_ids_meta_page()
6906 meta->meta_struct_len = sizeof(*meta); in rb_setup_ids_meta_page()
6907 meta->nr_subbufs = nr_subbufs; in rb_setup_ids_meta_page()
6908 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_setup_ids_meta_page()
6909 meta->meta_page_size = meta->subbuf_size; in rb_setup_ids_meta_page()
6919 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in rb_get_mapped_buffer()
6920 return ERR_PTR(-EINVAL); in rb_get_mapped_buffer()
6922 cpu_buffer = buffer->buffers[cpu]; in rb_get_mapped_buffer()
6924 mutex_lock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
6926 if (!cpu_buffer->user_mapped) { in rb_get_mapped_buffer()
6927 mutex_unlock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
6928 return ERR_PTR(-ENODEV); in rb_get_mapped_buffer()
6936 mutex_unlock(&cpu_buffer->mapping_lock); in rb_put_mapped_buffer()
6940 * Fast-path for rb_buffer_(un)map(). Called whenever the meta-page doesn't need
6941 * to be set-up or torn-down.
6948 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_inc_dec_mapped()
6951 if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped)) in __rb_inc_dec_mapped()
6952 return -EINVAL; in __rb_inc_dec_mapped()
6954 if (inc && cpu_buffer->mapped == UINT_MAX) in __rb_inc_dec_mapped()
6955 return -EBUSY; in __rb_inc_dec_mapped()
6957 if (WARN_ON(!inc && cpu_buffer->user_mapped == 0)) in __rb_inc_dec_mapped()
6958 return -EINVAL; in __rb_inc_dec_mapped()
6960 mutex_lock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
6961 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
6964 cpu_buffer->user_mapped++; in __rb_inc_dec_mapped()
6965 cpu_buffer->mapped++; in __rb_inc_dec_mapped()
6967 cpu_buffer->user_mapped--; in __rb_inc_dec_mapped()
6968 cpu_buffer->mapped--; in __rb_inc_dec_mapped()
6971 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
6972 mutex_unlock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
6978 * +--------------+ pgoff == 0
6980 * +--------------+ pgoff == 1
6983 * +--------------+ pgoff == (1 + (1 << subbuf_order))
6992 unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff; in __rb_map_vma()
6999 if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC || in __rb_map_vma()
7000 !(vma->vm_flags & VM_MAYSHARE)) in __rb_map_vma()
7001 return -EPERM; in __rb_map_vma()
7003 subbuf_order = cpu_buffer->buffer->subbuf_order; in __rb_map_vma()
7007 return -EINVAL; in __rb_map_vma()
7016 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_map_vma()
7018 nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */ in __rb_map_vma()
7019 nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */ in __rb_map_vma()
7021 return -EINVAL; in __rb_map_vma()
7023 nr_pages -= pgoff; in __rb_map_vma()
7027 return -EINVAL; in __rb_map_vma()
7033 return -ENOMEM; in __rb_map_vma()
7038 pages[p++] = virt_to_page(cpu_buffer->meta_page); in __rb_map_vma()
7041 * Pad with the zero-page to align the meta-page with the in __rb_map_vma()
7042 * sub-buffers. in __rb_map_vma()
7044 meta_page_padding = subbuf_pages - 1; in __rb_map_vma()
7045 while (meta_page_padding-- && p < nr_pages) { in __rb_map_vma()
7047 vma->vm_start + (PAGE_SIZE * p); in __rb_map_vma()
7052 /* Skip the meta-page */ in __rb_map_vma()
7053 pgoff -= subbuf_pages; in __rb_map_vma()
7063 err = -EINVAL; in __rb_map_vma()
7067 page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]); in __rb_map_vma()
7078 err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages); in __rb_map_vma()
7089 return -EOPNOTSUPP; in __rb_map_vma()
7100 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_map()
7101 return -EINVAL; in ring_buffer_map()
7103 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_map()
7105 mutex_lock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7107 if (cpu_buffer->user_mapped) { in ring_buffer_map()
7111 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7115 /* prevent another thread from changing buffer/sub-buffer sizes */ in ring_buffer_map()
7116 mutex_lock(&buffer->mutex); in ring_buffer_map()
7123 subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL); in ring_buffer_map()
7126 err = -ENOMEM; in ring_buffer_map()
7130 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_map()
7136 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7139 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7143 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7145 cpu_buffer->mapped++; in ring_buffer_map()
7146 cpu_buffer->user_mapped = 1; in ring_buffer_map()
7147 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7149 kfree(cpu_buffer->subbuf_ids); in ring_buffer_map()
7150 cpu_buffer->subbuf_ids = NULL; in ring_buffer_map()
7152 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_map()
7156 mutex_unlock(&buffer->mutex); in ring_buffer_map()
7157 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7168 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_unmap()
7169 return -EINVAL; in ring_buffer_unmap()
7171 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unmap()
7173 mutex_lock(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7175 if (!cpu_buffer->user_mapped) { in ring_buffer_unmap()
7176 err = -ENODEV; in ring_buffer_unmap()
7178 } else if (cpu_buffer->user_mapped > 1) { in ring_buffer_unmap()
7183 mutex_lock(&buffer->mutex); in ring_buffer_unmap()
7184 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7187 if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped)) in ring_buffer_unmap()
7188 cpu_buffer->mapped--; in ring_buffer_unmap()
7189 cpu_buffer->user_mapped = 0; in ring_buffer_unmap()
7191 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7193 kfree(cpu_buffer->subbuf_ids); in ring_buffer_unmap()
7194 cpu_buffer->subbuf_ids = NULL; in ring_buffer_unmap()
7196 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_unmap()
7198 mutex_unlock(&buffer->mutex); in ring_buffer_unmap()
7201 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7218 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7224 reader_size = rb_page_size(cpu_buffer->reader_page); in ring_buffer_map_get_reader()
7231 if (cpu_buffer->reader_page->read < reader_size) { in ring_buffer_map_get_reader()
7232 while (cpu_buffer->reader_page->read < reader_size) in ring_buffer_map_get_reader()
7242 missed_events = cpu_buffer->lost_events; in ring_buffer_map_get_reader()
7244 if (cpu_buffer->reader_page != cpu_buffer->commit_page) { in ring_buffer_map_get_reader()
7246 struct buffer_data_page *bpage = reader->page; in ring_buffer_map_get_reader()
7253 if (reader->real_end) in ring_buffer_map_get_reader()
7254 local_set(&bpage->commit, reader->real_end); in ring_buffer_map_get_reader()
7260 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_map_get_reader()
7261 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_map_get_reader()
7263 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_map_get_reader()
7265 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_map_get_reader()
7275 cpu_buffer->lost_events = 0; in ring_buffer_map_get_reader()
7280 /* Some archs do not have data cache coherency between kernel and user-space */ in ring_buffer_map_get_reader()
7281 flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page)); in ring_buffer_map_get_reader()
7285 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7304 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
7313 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
7314 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
7322 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
7324 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
7327 return -ENOMEM; in trace_rb_cpu_prepare()
7330 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
7398 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
7401 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
7409 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
7414 data->bytes_dropped += len; in rb_write_something()
7416 data->bytes_dropped_nested += len; in rb_write_something()
7423 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
7427 item->size = size; in rb_write_something()
7428 memcpy(item->str, rb_string, size); in rb_write_something()
7431 data->bytes_alloc_nested += event_len; in rb_write_something()
7432 data->bytes_written_nested += len; in rb_write_something()
7433 data->events_nested++; in rb_write_something()
7434 if (!data->min_size_nested || len < data->min_size_nested) in rb_write_something()
7435 data->min_size_nested = len; in rb_write_something()
7436 if (len > data->max_size_nested) in rb_write_something()
7437 data->max_size_nested = len; in rb_write_something()
7439 data->bytes_alloc += event_len; in rb_write_something()
7440 data->bytes_written += len; in rb_write_something()
7441 data->events++; in rb_write_something()
7442 if (!data->min_size || len < data->min_size) in rb_write_something()
7443 data->max_size = len; in rb_write_something()
7444 if (len > data->max_size) in rb_write_something()
7445 data->max_size = len; in rb_write_something()
7449 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
7460 data->cnt++; in rb_test()
7463 /* Now sleep between a min of 100-300us and a max of 1ms */ in rb_test()
7464 usleep_range(((data->cnt % 3) + 1) * 100, 1000); in rb_test()
7582 ret = -1; in test_ringbuffer()
7584 total_events = data->events + data->events_nested; in test_ringbuffer()
7585 total_written = data->bytes_written + data->bytes_written_nested; in test_ringbuffer()
7586 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; in test_ringbuffer()
7587 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; in test_ringbuffer()
7589 big_event_size = data->max_size + data->max_size_nested; in test_ringbuffer()
7590 small_event_size = data->min_size + data->min_size_nested; in test_ringbuffer()
7609 total_size += item->size + sizeof(struct rb_item); in test_ringbuffer()
7610 if (memcmp(&item->str[0], rb_string, item->size) != 0) { in test_ringbuffer()
7612 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
7613 pr_info("expected: %.*s\n", item->size, rb_string); in test_ringbuffer()
7615 ret = -1; in test_ringbuffer()
7623 ret = -1; in test_ringbuffer()