Lines Matching full:buffer

3  * Generic ring buffer
39 * The "absolute" timestamp in the buffer is only 59 bits.
67 * The ring buffer header is special. We must manually up keep it.
89 * The ring buffer is made up of a list of pages. A separate list of pages is
90 * allocated for each CPU. A writer may only write to a buffer that is
92 * from any per cpu buffer.
94 * The reader is special. For each per cpu buffer, the reader has its own
96 * page is swapped with another page in the ring buffer.
100 * again (as long as it is out of the ring buffer).
105 * |reader| RING BUFFER
116 * |reader| RING BUFFER
127 * |reader| RING BUFFER
138 * |buffer| RING BUFFER
150 * and swap that into the ring buffer.
288 /* inline for ring buffer fast paths */
312 #define for_each_buffer_cpu(buffer, cpu) \ argument
313 for_each_cpu(cpu, buffer->cpumask)
315 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
316 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
343 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
352 * Note, the buffer_page list must be first. The buffer pages
353 * are allocated in cache lines, which means that each buffer
356 * add flags in the list struct pointers, to make the ring buffer
360 struct list_head list; /* list of buffer pages */
372 * The buffer page counters, write and entries, must be reset
439 * ABSOLUTE - the buffer requests all events to have absolute time stamps
475 * head_page == tail_page && head == tail then buffer is empty.
481 struct trace_buffer *buffer; member
526 /* ring buffer pages to update, > 0 to add, < 0 to remove */
579 int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s) in ring_buffer_print_page_header() argument
603 (unsigned int)buffer->subbuf_size, in ring_buffer_print_page_header()
621 * is on the buffer that it passed in.
678 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
682 * @buffer: The buffer that the event is on
686 * committed to the ring buffer. And must be called from the same
693 * the max nesting, then the write_stamp of the buffer is returned,
697 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, in ring_buffer_event_time_stamp() argument
700 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
729 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
730 * @buffer: The ring_buffer to get the number of pages from
733 * Returns the number of pages that have content in the ring buffer.
735 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
741 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
742 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
743 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
759 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
770 * Add one as dirty will never equal nr_pages, as the sub-buffer in full_hit()
774 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
780 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
783 * ring buffer waiters queue.
812 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
813 * @buffer: The ring buffer to wake waiters on
814 * @cpu: The CPU buffer to wake waiters on
816 * In the case of a file that represents a ring buffer is closing,
819 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
824 if (!buffer) in ring_buffer_wake_waiters()
830 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
831 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
833 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
835 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
840 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
841 /* The CPU buffer may not have been initialized yet */ in ring_buffer_wake_waiters()
851 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) in rb_watermark_hit() argument
858 return !ring_buffer_empty(buffer); in rb_watermark_hit()
860 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
862 if (!ring_buffer_empty_cpu(buffer, cpu)) { in rb_watermark_hit()
871 ret = !pagebusy && full_hit(buffer, cpu, full); in rb_watermark_hit()
883 rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer, in rb_wait_cond() argument
886 if (rb_watermark_hit(buffer, cpu, full)) in rb_wait_cond()
899 * We don't clear it even if the buffer is no longer in rb_wait_cond()
938 * ring_buffer_wait - wait for input to the ring buffer
939 * @buffer: buffer to wait on
940 * @cpu: the cpu buffer to wait on
946 * as data is added to any of the @buffer's cpu buffers. Otherwise
947 * it will wait for data to be added to a specific cpu buffer.
949 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, in ring_buffer_wait() argument
960 * data in any cpu buffer, or a specific buffer, put the in ring_buffer_wait()
964 rbwork = &buffer->irq_work; in ring_buffer_wait()
968 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
970 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
988 rb_wait_cond(rbwork, buffer, cpu, full, cond, data)); in ring_buffer_wait()
994 * ring_buffer_poll_wait - poll on buffer input
995 * @buffer: buffer to wait on
996 * @cpu: the cpu buffer to wait on
1002 * as data is added to any of the @buffer's cpu buffers. Otherwise
1003 * it will wait for data to be added to a specific cpu buffer.
1008 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1015 rbwork = &buffer->irq_work; in ring_buffer_poll_wait()
1018 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1021 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1028 if (rb_watermark_hit(buffer, cpu, full)) in ring_buffer_poll_wait()
1034 * compare the amount in the ring buffer to shortest_full. in ring_buffer_poll_wait()
1035 * If the amount in the ring buffer is greater than the in ring_buffer_poll_wait()
1052 * checking if the ring buffer is empty. Once the waiters_pending bit in ring_buffer_poll_wait()
1059 * the buffer goes from empty to having content. But as this race is in ring_buffer_poll_wait()
1065 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1066 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1071 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1079 atomic_inc(&__b->buffer->record_disabled); \
1090 static inline u64 rb_time_stamp(struct trace_buffer *buffer) in rb_time_stamp() argument
1095 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1098 ts = buffer->clock(); in rb_time_stamp()
1104 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) in ring_buffer_time_stamp() argument
1109 time = rb_time_stamp(buffer); in ring_buffer_time_stamp()
1116 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, in ring_buffer_normalize_time_stamp() argument
1125 * Making the ring buffer lockless makes things tricky.
1130 * The reader page is always off the ring buffer, but when the
1132 * a new one from the buffer. The reader needs to take from
1182 * the reader page with a page in the buffer, but before it
1505 * rb_check_pages - integrity check of buffer pages
1506 * @cpu_buffer: CPU buffer with pages to test
1519 * Walk the linked list underpinning the ring buffer and validate all in rb_check_pages()
1526 * ring buffer size. Therefore, the code releases and re-acquires the in rb_check_pages()
1590 static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu) in rb_range_meta() argument
1592 int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_range_meta()
1598 bmeta = buffer->meta; in rb_range_meta()
1652 * Return a specific sub-buffer for a given @cpu defined by @idx.
1660 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); in rb_range_buffer()
1669 /* Map this buffer to the order that's in meta->buffers[] */ in rb_range_buffer()
1675 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end) in rb_range_buffer()
1685 static bool rb_meta_init(struct trace_buffer *buffer, int scratch_size) in rb_meta_init() argument
1687 unsigned long ptr = buffer->range_addr_start; in rb_meta_init()
1693 buffer->meta = bmeta; in rb_meta_init()
1695 total_size = buffer->range_addr_end - buffer->range_addr_start; in rb_meta_init()
1700 /* The first buffer will start word size after the meta page */ in rb_meta_init()
1706 pr_info("Ring buffer boot meta mismatch of magic\n"); in rb_meta_init()
1711 pr_info("Ring buffer boot meta mismatch of struct size\n"); in rb_meta_init()
1716 pr_info("Ring buffer boot meta mismatch of total size\n"); in rb_meta_init()
1721 pr_info("Ring buffer boot meta mismatch of offset outside of total size\n"); in rb_meta_init()
1726 pr_info("Ring buffer boot meta mismatch of first buffer offset\n"); in rb_meta_init()
1745 * See if the existing memory contains valid ring buffer data.
1751 struct trace_buffer *buffer, int nr_pages, in rb_cpu_meta_valid() argument
1769 pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu); in rb_cpu_meta_valid()
1775 pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu); in rb_cpu_meta_valid()
1787 pr_info("Ring buffer boot meta [%d] array out of range\n", cpu); in rb_cpu_meta_valid()
1792 pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu); in rb_cpu_meta_valid()
1797 pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu); in rb_cpu_meta_valid()
1889 pr_info("Ring buffer reader page is invalid\n"); in rb_meta_validate_events()
1935 pr_info("Ring buffer [%d] rewound %d pages\n", cpu_buffer->cpu, i); in rb_meta_validate_events()
1942 * If the ring buffer was rewound, then inject the reader page in rb_meta_validate_events()
2004 pr_info("Ring buffer meta [%d] invalid buffer page\n", in rb_meta_validate_events()
2009 /* If the buffer has content, update pages_touched */ in rb_meta_validate_events()
2022 pr_info("Ring buffer meta [%d] commit page not found\n", in rb_meta_validate_events()
2030 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); in rb_meta_validate_events()
2049 static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int scratch_size) in rb_range_meta_init() argument
2063 if (rb_meta_init(buffer, scratch_size)) in rb_range_meta_init()
2069 meta = rb_range_meta(buffer, nr_pages, cpu); in rb_range_meta_init()
2071 if (valid && rb_cpu_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) { in rb_range_meta_init()
2082 next_meta = rb_range_meta(buffer, nr_pages, cpu + 1); in rb_range_meta_init()
2084 next_meta = (void *)buffer->range_addr_end; in rb_range_meta_init()
2099 * location of the ring buffer. Although their addresses in rb_range_meta_init()
2154 seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]); in rbm_show()
2170 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu) in ring_buffer_meta_seq_init() argument
2180 m->private = buffer->buffers[cpu]; in ring_buffer_meta_seq_init()
2203 struct trace_buffer *buffer = cpu_buffer->buffer; in __rb_allocate_pages() local
2240 if (buffer->range_addr_start) in __rb_allocate_pages()
2241 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); in __rb_allocate_pages()
2260 /* A range was given. Use that for the buffer page */ in __rb_allocate_pages()
2272 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
2278 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
2310 * The ring buffer page list is a circular list that does not in rb_allocate_pages()
2325 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
2339 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
2341 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
2359 if (buffer->range_addr_start) { in rb_allocate_cpu_buffer()
2365 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); in rb_allocate_cpu_buffer()
2375 cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
2397 pr_warn("Ring buffer meta buffers not all mapped\n"); in rb_allocate_cpu_buffer()
2415 /* The valid meta buffer still needs to activate the head page */ in rb_allocate_cpu_buffer()
2458 struct trace_buffer *buffer __free(kfree) = NULL; in alloc_buffer()
2466 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in alloc_buffer()
2468 if (!buffer) in alloc_buffer()
2471 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in alloc_buffer()
2474 buffer->subbuf_order = order; in alloc_buffer()
2476 buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE; in alloc_buffer()
2478 /* Max payload is buffer page size - header (8bytes) */ in alloc_buffer()
2479 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2); in alloc_buffer()
2481 buffer->flags = flags; in alloc_buffer()
2482 buffer->clock = trace_clock_local; in alloc_buffer()
2483 buffer->reader_lock_key = key; in alloc_buffer()
2485 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in alloc_buffer()
2486 init_waitqueue_head(&buffer->irq_work.waiters); in alloc_buffer()
2488 buffer->cpus = nr_cpu_ids; in alloc_buffer()
2491 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in alloc_buffer()
2493 if (!buffer->buffers) in alloc_buffer()
2508 /* Subtract the buffer meta data and word aligned */ in alloc_buffer()
2547 buffer->range_addr_start = start; in alloc_buffer()
2548 buffer->range_addr_end = end; in alloc_buffer()
2550 rb_range_meta_init(buffer, nr_pages, scratch_size); in alloc_buffer()
2554 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in alloc_buffer()
2560 cpumask_set_cpu(cpu, buffer->cpumask); in alloc_buffer()
2561 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in alloc_buffer()
2562 if (!buffer->buffers[cpu]) in alloc_buffer()
2565 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in alloc_buffer()
2569 mutex_init(&buffer->mutex); in alloc_buffer()
2571 return_ptr(buffer); in alloc_buffer()
2574 for_each_buffer_cpu(buffer, cpu) { in alloc_buffer()
2575 if (buffer->buffers[cpu]) in alloc_buffer()
2576 rb_free_cpu_buffer(buffer->buffers[cpu]); in alloc_buffer()
2578 kfree(buffer->buffers); in alloc_buffer()
2581 free_cpumask_var(buffer->cpumask); in alloc_buffer()
2589 * @flags: attributes to set for the ring buffer.
2590 * @key: ring buffer reader_lock_key.
2593 * flag. This flag means that the buffer will overwrite old data
2594 * when the buffer wraps. If this flag is not set, the buffer will
2600 /* Default buffer page size - one system page */ in __ring_buffer_alloc()
2609 * @flags: attributes to set for the ring buffer.
2610 * @order: sub-buffer order
2614 * @key: ring buffer reader_lock_key.
2617 * flag. This flag means that the buffer will overwrite old data
2618 * when the buffer wraps. If this flag is not set, the buffer will
2631 void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size) in ring_buffer_meta_scratch() argument
2636 if (!buffer || !buffer->meta) in ring_buffer_meta_scratch()
2639 meta = buffer->meta; in ring_buffer_meta_scratch()
2650 * ring_buffer_free - free a ring buffer.
2651 * @buffer: the buffer to free.
2654 ring_buffer_free(struct trace_buffer *buffer) in ring_buffer_free() argument
2658 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
2660 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
2662 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
2663 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
2665 kfree(buffer->buffers); in ring_buffer_free()
2666 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
2668 kfree(buffer); in ring_buffer_free()
2672 void ring_buffer_set_clock(struct trace_buffer *buffer, in ring_buffer_set_clock() argument
2675 buffer->clock = clock; in ring_buffer_set_clock()
2678 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) in ring_buffer_set_time_stamp_abs() argument
2680 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
2683 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) in ring_buffer_time_stamp_abs() argument
2685 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
2725 * from the ring buffer in rb_remove_pages()
2754 /* make sure pages points to a valid page in the ring buffer */ in rb_remove_pages()
2769 /* last buffer page to remove */ in rb_remove_pages()
2786 * bytes consumed in ring buffer from here. in rb_remove_pages()
2820 * in the ring buffer. Now we are racing with the writer trying to in rb_insert_pages()
2913 * ring_buffer_resize - resize the ring buffer
2914 * @buffer: the buffer to resize.
2916 * @cpu_id: the cpu buffer to resize
2918 * Minimum size is 2 * buffer->subbuf_size.
2922 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, in ring_buffer_resize() argument
2930 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2932 if (!buffer) in ring_buffer_resize()
2935 /* Make sure the requested buffer exists */ in ring_buffer_resize()
2937 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2940 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in ring_buffer_resize()
2952 /* prevent another thread from changing buffer sizes */ in ring_buffer_resize()
2953 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2954 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2959 * manipulating the ring buffer and is expecting a sane state while in ring_buffer_resize()
2962 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2971 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2972 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2999 * since we can change their buffer sizes without any race. in ring_buffer_resize()
3001 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3002 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3025 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3026 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3036 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
3043 * manipulating the ring buffer and is expecting a sane state while in ring_buffer_resize()
3084 * The ring buffer resize can happen with the ring buffer in ring_buffer_resize()
3086 * as possible. But if the buffer is disabled, we do not need in ring_buffer_resize()
3088 * that the buffer is not corrupt. in ring_buffer_resize()
3090 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
3091 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
3093 * Even though the buffer was disabled, we must make sure in ring_buffer_resize()
3099 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3103 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
3106 atomic_dec(&buffer->resizing); in ring_buffer_resize()
3107 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
3111 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3114 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3127 atomic_dec(&buffer->resizing); in ring_buffer_resize()
3128 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
3133 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) in ring_buffer_change_overwrite() argument
3135 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
3137 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
3139 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
3140 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
3232 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
3257 /* Return the index into the sub-buffers for a given sub-buffer */
3471 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
3540 /* Set write to end of buffer */ in rb_reset_tail()
3556 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail() local
3566 * it all the way around the buffer, bail, and warn in rb_move_tail()
3579 * page with the buffer head. in rb_move_tail()
3585 * the buffer, unless the commit page is still on the in rb_move_tail()
3599 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
3615 * page. We could have a small buffer, and in rb_move_tail()
3616 * have filled up the buffer with events in rb_move_tail()
3714 * is added to the buffer, it will lose those bits. in rb_add_timestamp()
3730 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
3745 * @cpu_buffer: The per cpu buffer of the @event
3750 * is the actual size that is written to the ring buffer,
3825 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
3999 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
4001 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
4002 buffer->irq_work.waiters_pending = false; in rb_wakeups()
4004 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
4024 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
4089 * if an interrupt comes in while NORMAL bit is set and the ring buffer
4141 * @buffer: The ring buffer to modify
4143 * The ring buffer has a safety mechanism to prevent recursion.
4152 void ring_buffer_nest_start(struct trace_buffer *buffer) in ring_buffer_nest_start() argument
4160 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
4167 * @buffer: The ring buffer to modify
4172 void ring_buffer_nest_end(struct trace_buffer *buffer) in ring_buffer_nest_end() argument
4179 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
4187 * @buffer: The buffer to commit to
4189 * This commits the data to the ring buffer, and releases any locks held.
4193 int ring_buffer_unlock_commit(struct trace_buffer *buffer) in ring_buffer_unlock_commit() argument
4198 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
4202 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
4359 * the buffer page.
4437 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4448 /* Use the sub-buffer timestamp */ in __rb_reserve_next()
4471 /* See if we shot pass the end of this buffer page */ in __rb_reserve_next()
4472 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
4507 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4544 /* We reserved something on the buffer */ in __rb_reserve_next()
4565 rb_reserve_next_event(struct trace_buffer *buffer, in rb_reserve_next_event() argument
4575 * ring buffer does cmpxchg as well as atomic64 operations in rb_reserve_next_event()
4590 * Due to the ability to swap a cpu buffer from a buffer in rb_reserve_next_event()
4596 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
4605 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
4608 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
4646 * ring_buffer_lock_reserve - reserve a part of the buffer
4647 * @buffer: the ring buffer to reserve from
4650 * Returns a reserved event on the ring buffer to copy directly to.
4661 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) in ring_buffer_lock_reserve() argument
4670 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
4675 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
4678 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
4683 if (unlikely(length > buffer->max_data_size)) in ring_buffer_lock_reserve()
4689 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
4717 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
4739 /* commit not part of this buffer?? */ in rb_decrement_entry()
4745 * @buffer: the ring buffer
4748 * Sometimes an event that is in the ring buffer needs to be ignored.
4749 * This function lets the user discard an event in the ring buffer
4753 * committed. It will try to free the event from the ring buffer
4762 void ring_buffer_discard_commit(struct trace_buffer *buffer, in ring_buffer_discard_commit() argument
4772 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
4779 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
4793 * ring_buffer_write - write data to the buffer without reserving
4794 * @buffer: The ring buffer to write to.
4796 * @data: The data to write to the buffer.
4799 * one function. If you already have the data to write to the buffer, it
4805 int ring_buffer_write(struct trace_buffer *buffer, in ring_buffer_write() argument
4817 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
4822 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
4825 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4830 if (length > buffer->max_data_size) in ring_buffer_write()
4836 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
4846 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
4857 * The total entries in the ring buffer is the running counter
4858 * of entries entered into the ring buffer, minus the sum of
4859 * the entries read from the ring buffer and the number of
4875 * ring_buffer_record_disable - stop all writes into the buffer
4876 * @buffer: The ring buffer to stop writes to.
4878 * This prevents all writes to the buffer. Any attempt to write
4879 * to the buffer after this will fail and return NULL.
4883 void ring_buffer_record_disable(struct trace_buffer *buffer) in ring_buffer_record_disable() argument
4885 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
4890 * ring_buffer_record_enable - enable writes to the buffer
4891 * @buffer: The ring buffer to enable writes
4896 void ring_buffer_record_enable(struct trace_buffer *buffer) in ring_buffer_record_enable() argument
4898 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4903 * ring_buffer_record_off - stop all writes into the buffer
4904 * @buffer: The ring buffer to stop writes to.
4906 * This prevents all writes to the buffer. Any attempt to write
4907 * to the buffer after this will fail and return NULL.
4913 void ring_buffer_record_off(struct trace_buffer *buffer) in ring_buffer_record_off() argument
4918 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4921 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
4926 * ring_buffer_record_on - restart writes into the buffer
4927 * @buffer: The ring buffer to start writes to.
4929 * This enables all writes to the buffer that was disabled by
4936 void ring_buffer_record_on(struct trace_buffer *buffer) in ring_buffer_record_on() argument
4941 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4944 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4949 * ring_buffer_record_is_on - return true if the ring buffer can write
4950 * @buffer: The ring buffer to see if write is enabled
4952 * Returns true if the ring buffer is in a state that it accepts writes.
4954 bool ring_buffer_record_is_on(struct trace_buffer *buffer) in ring_buffer_record_is_on() argument
4956 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4960 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4961 * @buffer: The ring buffer to see if write is set enabled
4963 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4966 * It may return true when the ring buffer has been disabled by
4968 * the ring buffer.
4970 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) in ring_buffer_record_is_set_on() argument
4972 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4976 * ring_buffer_record_is_on_cpu - return true if the ring buffer can write
4977 * @buffer: The ring buffer to see if write is enabled
4978 * @cpu: The CPU to test if the ring buffer can write too
4980 * Returns true if the ring buffer is in a state that it accepts writes
4983 bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_is_on_cpu() argument
4987 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_is_on_cpu()
4989 return ring_buffer_record_is_set_on(buffer) && in ring_buffer_record_is_on_cpu()
4995 * @buffer: The ring buffer to stop writes to.
4996 * @cpu: The CPU buffer to stop
4998 * This prevents all writes to the buffer. Any attempt to write
4999 * to the buffer after this will fail and return NULL.
5003 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
5007 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
5010 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
5016 * ring_buffer_record_enable_cpu - enable writes to the buffer
5017 * @buffer: The ring buffer to enable writes
5023 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
5027 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
5030 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
5036 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
5037 * @buffer: The ring buffer
5038 * @cpu: The per CPU buffer to read from.
5040 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
5047 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
5050 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
5069 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
5070 * @buffer: The ring buffer
5071 * @cpu: The per CPU buffer to read from.
5073 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
5078 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
5081 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
5089 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
5090 * @buffer: The ring buffer
5091 * @cpu: The per CPU buffer to get the entries from.
5093 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
5097 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
5100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
5108 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
5109 * @buffer: The ring buffer
5110 * @cpu: The per CPU buffer to get the number of overruns from
5112 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
5117 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
5120 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
5129 * commits failing due to the buffer wrapping around while there are uncommitted
5131 * @buffer: The ring buffer
5132 * @cpu: The per CPU buffer to get the number of overruns from
5135 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
5140 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
5143 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
5152 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
5153 * @buffer: The ring buffer
5154 * @cpu: The per CPU buffer to get the number of overruns from
5157 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
5162 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
5165 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
5174 * @buffer: The ring buffer
5175 * @cpu: The per CPU buffer to get the number of events read
5178 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
5182 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
5185 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
5191 * ring_buffer_entries - get the number of entries in a buffer
5192 * @buffer: The ring buffer
5194 * Returns the total number of entries in the ring buffer
5197 unsigned long ring_buffer_entries(struct trace_buffer *buffer) in ring_buffer_entries() argument
5203 /* if you care about this being correct, lock the buffer */ in ring_buffer_entries()
5204 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
5205 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
5214 * ring_buffer_overruns - get the number of overruns in buffer
5215 * @buffer: The ring buffer
5217 * Returns the total number of overruns in the ring buffer
5220 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) in ring_buffer_overruns() argument
5226 /* if you care about this being correct, lock the buffer */ in ring_buffer_overruns()
5227 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
5228 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
5393 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
5430 /* Don't bother swapping if the ring buffer is empty */ in rb_get_reader_page()
5452 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
5453 * has no specific buffer page to point to. Lets move it out in rb_get_reader_page()
5568 /* This function should not be called when buffer is empty */ in rb_advance_reader()
5600 * Check if we are at the end of the buffer. in rb_advance_iter()
5667 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5677 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5695 struct trace_buffer *buffer; in rb_iter_peek() local
5704 buffer = cpu_buffer->buffer; in rb_iter_peek()
5707 * Check if someone performed a consuming read to the buffer in rb_iter_peek()
5708 * or removed some pages from the buffer. In these cases, in rb_iter_peek()
5724 * the ring buffer with an active write as the consumer is. in rb_iter_peek()
5760 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
5770 ring_buffer_normalize_time_stamp(buffer, in rb_iter_peek()
5791 * If an NMI die dumps out the content of the ring buffer in rb_reader_lock()
5793 * preempted a task that holds the ring buffer locks. If in rb_reader_lock()
5795 * to do the read, but this can corrupt the ring buffer, in rb_reader_lock()
5802 /* Continue without locking, but disable the ring buffer */ in rb_reader_lock()
5816 * @buffer: The ring buffer to read
5825 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
5828 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5833 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
5852 * @iter: The ring buffer iterator
5867 * @iter: The ring buffer iterator
5893 * @buffer: The ring buffer to get the next event from
5894 * @cpu: the cpu to read the buffer from
5898 * Returns the next event in the ring buffer, and that event is consumed.
5900 * and eventually empty the ring buffer if the producer is slower.
5903 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
5915 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5918 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5942 * ring_buffer_read_start - start a non consuming read of the buffer
5943 * @buffer: The ring buffer to read from
5944 * @cpu: The cpu buffer to iterate over
5948 * the buffer. If the buffer is disabled for writing, it will produce
5949 * the same information each time, but if the buffer is still writing
5955 ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_start() argument
5960 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_start()
5968 iter->event_size = buffer->subbuf_size; in ring_buffer_read_start()
5975 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_start()
5991 * ring_buffer_read_finish - finish reading the iterator of the buffer
5994 * This re-enables resizing of the buffer, and frees the iterator.
6001 /* Use this opportunity to check the integrity of the ring buffer. */ in ring_buffer_read_finish()
6012 * @iter: The ring buffer iterator
6031 * ring_buffer_size - return the size of the ring buffer (in bytes)
6032 * @buffer: The ring buffer.
6033 * @cpu: The CPU to get ring buffer size from.
6035 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
6037 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
6040 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
6046 * @buffer: The ring buffer.
6050 unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer) in ring_buffer_max_event_size() argument
6053 if (ring_buffer_time_stamp_abs(buffer)) in ring_buffer_max_event_size()
6054 return buffer->max_data_size - RB_LEN_TIME_EXTEND; in ring_buffer_max_event_size()
6055 return buffer->max_data_size; in ring_buffer_max_event_size()
6068 * When the buffer is memory mapped to user space, each sub buffer
6072 * For a normal allocated ring buffer, the id is saved in the buffer page
6075 * But for a fixed memory mapped buffer, the id is already assigned for
6079 * For the normal pages, set the buffer page id with the passed in @id
6090 * otherwise, set the buffer page with this id in rb_page_id()
6177 /* Must have disabled the cpu buffer then done a synchronize_rcu */
6193 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
6194 * @buffer: The ring buffer to reset a per cpu buffer of
6195 * @cpu: The CPU buffer to be reset
6197 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
6199 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
6201 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
6204 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset_cpu()
6205 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
6218 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
6226 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
6227 * @buffer: The ring buffer to reset a per cpu buffer of
6229 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) in ring_buffer_reset_online_cpus() argument
6234 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset_online_cpus()
6235 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
6237 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
6238 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6247 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
6248 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6263 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
6267 * ring_buffer_reset - reset a ring buffer
6268 * @buffer: The ring buffer to reset all cpu buffers
6270 void ring_buffer_reset(struct trace_buffer *buffer) in ring_buffer_reset() argument
6275 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset()
6276 mutex_lock(&buffer->mutex); in ring_buffer_reset()
6278 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
6279 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6288 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
6289 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6297 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
6302 * ring_buffer_empty - is the ring buffer empty?
6303 * @buffer: The ring buffer to test
6305 bool ring_buffer_empty(struct trace_buffer *buffer) in ring_buffer_empty() argument
6313 /* yes this is racy, but if you don't like the race, lock the buffer */ in ring_buffer_empty()
6314 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
6315 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
6331 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
6332 * @buffer: The ring buffer
6333 * @cpu: The CPU buffer to test
6335 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
6342 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
6345 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
6358 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
6359 * @buffer_a: One buffer to swap with
6360 * @buffer_b: The other buffer to swap with
6364 * of a CPU buffer and has another back up buffer lying around.
6365 * it is expected that the tracer handles the cpu buffer not being
6422 * it will mess the state of the cpu buffer. in ring_buffer_swap_cpu()
6432 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
6433 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
6446 * ring_buffer_alloc_read_page - allocate a page to read from buffer
6447 * @buffer: the buffer to allocate for.
6448 * @cpu: the cpu buffer to allocate.
6451 * When reading a full page from the ring buffer, these functions
6454 * needs to get pages from the ring buffer, it passes the result
6456 * the page that was allocated, with the read page of the buffer.
6462 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
6469 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
6476 bpage->order = buffer->subbuf_order; in ring_buffer_alloc_read_page()
6477 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
6494 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
6511 * @buffer: the buffer the page was allocate for
6512 * @cpu: the cpu buffer the page came from
6517 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, in ring_buffer_free_read_page() argument
6525 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
6528 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
6532 * is different from the subbuffer order of the buffer - in ring_buffer_free_read_page()
6535 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order) in ring_buffer_free_read_page()
6556 * ring_buffer_read_page - extract a page from the ring buffer
6557 * @buffer: buffer to extract from
6560 * @cpu: the cpu of the buffer to extract
6563 * This function will pull out a page from the ring buffer and consume it.
6566 * to swap with a page in the ring buffer.
6569 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
6572 * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
6575 * ring_buffer_free_read_page(buffer, cpu, rpage);
6581 * The ring buffer can be used anywhere in the kernel and can not
6582 * blindly call wake_up. The layer that uses the ring buffer must be
6589 int ring_buffer_read_page(struct trace_buffer *buffer, in ring_buffer_read_page() argument
6593 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
6602 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
6617 if (data_page->order != buffer->subbuf_order) in ring_buffer_read_page()
6642 * we must copy the data from the page to the buffer. in ring_buffer_read_page()
6740 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
6752 if (commit < buffer->subbuf_size) in ring_buffer_read_page()
6753 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit); in ring_buffer_read_page()
6772 * ring_buffer_subbuf_size_get - get size of the sub buffer.
6773 * @buffer: the buffer to get the sub buffer size from
6775 * Returns size of the sub buffer, in bytes.
6777 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer) in ring_buffer_subbuf_size_get() argument
6779 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_size_get()
6784 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
6785 * @buffer: The ring_buffer to get the system sub page order from
6787 * By default, one ring buffer sub page equals to one system page. This parameter
6788 * is configurable, per ring buffer. The size of the ring buffer sub page can be
6791 * Returns the order of buffer sub page size, in system pages:
6792 * 0 means the sub buffer size is 1 system page and so forth.
6795 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer) in ring_buffer_subbuf_order_get() argument
6797 if (!buffer) in ring_buffer_subbuf_order_get()
6800 return buffer->subbuf_order; in ring_buffer_subbuf_order_get()
6805 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
6806 * @buffer: The ring_buffer to set the new page size.
6807 * @order: Order of the system pages in one sub buffer page
6809 * By default, one ring buffer pages equals to one system page. This API can be
6810 * used to set new size of the ring buffer page. The size must be order of
6812 * system pages that are allocated for one ring buffer page:
6820 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) in ring_buffer_subbuf_order_set() argument
6830 if (!buffer || order < 0) in ring_buffer_subbuf_order_set()
6833 if (buffer->subbuf_order == order) in ring_buffer_subbuf_order_set()
6844 old_order = buffer->subbuf_order; in ring_buffer_subbuf_order_set()
6845 old_size = buffer->subbuf_size; in ring_buffer_subbuf_order_set()
6847 /* prevent another thread from changing buffer sizes */ in ring_buffer_subbuf_order_set()
6848 guard(mutex)(&buffer->mutex); in ring_buffer_subbuf_order_set()
6849 atomic_inc(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6854 buffer->subbuf_order = order; in ring_buffer_subbuf_order_set()
6855 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_order_set()
6858 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6860 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6863 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6871 nr_pages = old_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_subbuf_order_set()
6872 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size); in ring_buffer_subbuf_order_set()
6883 /* Allocate the new size buffer */ in ring_buffer_subbuf_order_set()
6893 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6898 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6901 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6952 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6957 buffer->subbuf_order = old_order; in ring_buffer_subbuf_order_set()
6958 buffer->subbuf_size = old_size; in ring_buffer_subbuf_order_set()
6960 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6962 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
7036 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_setup_ids_meta_page()
7043 rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu) in rb_get_mapped_buffer() argument
7047 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in rb_get_mapped_buffer()
7050 cpu_buffer = buffer->buffers[cpu]; in rb_get_mapped_buffer()
7088 mutex_lock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7100 mutex_unlock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7131 subbuf_order = cpu_buffer->buffer->subbuf_order; in __rb_map_vma()
7216 int ring_buffer_map(struct trace_buffer *buffer, int cpu, in ring_buffer_map() argument
7223 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_map()
7226 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_map()
7237 /* prevent another thread from changing buffer/sub-buffer sizes */ in ring_buffer_map()
7238 guard(mutex)(&buffer->mutex); in ring_buffer_map()
7279 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu) in ring_buffer_unmap() argument
7284 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_unmap()
7287 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unmap()
7298 guard(mutex)(&buffer->mutex); in ring_buffer_unmap()
7316 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu) in ring_buffer_map_get_reader() argument
7324 cpu_buffer = rb_get_mapped_buffer(buffer, cpu); in ring_buffer_map_get_reader()
7370 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_map_get_reader()
7384 * addition of an event and then filled the buffer in ring_buffer_map_get_reader()
7390 pr_info("Ring buffer [%d] commit overrun lost %ld events at timestamp:%lld\n", in ring_buffer_map_get_reader()
7402 buffer->subbuf_size + BUF_PAGE_HDR_SIZE); in ring_buffer_map_get_reader()
7414 * If we were to free the buffer, then the user would lose any trace that was in
7415 * the buffer.
7419 struct trace_buffer *buffer; in trace_rb_cpu_prepare() local
7424 buffer = container_of(node, struct trace_buffer, node); in trace_rb_cpu_prepare()
7425 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
7431 for_each_buffer_cpu(buffer, cpu_i) { in trace_rb_cpu_prepare()
7434 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
7435 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
7443 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
7444 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
7445 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
7446 WARN(1, "failed to allocate ring buffer on CPU %u\n", in trace_rb_cpu_prepare()
7451 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
7457 * This is a basic integrity check of the ring buffer.
7460 * writing to the per cpu ring buffer various sizes of data.
7464 * IPIs to the other CPUs to also write into the ring buffer.
7465 * this is to test the nesting ability of the buffer.
7468 * ring buffer should happen that's not expected, a big warning
7474 struct trace_buffer *buffer; member
7527 /* read rb_test_started before checking buffer enabled */ in rb_write_something()
7530 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
7544 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
7570 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
7616 struct trace_buffer *buffer; in test_ringbuffer() local
7621 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); in test_ringbuffer()
7625 pr_info("Running ring buffer tests...\n"); in test_ringbuffer()
7627 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); in test_ringbuffer()
7628 if (WARN_ON(!buffer)) in test_ringbuffer()
7631 /* Disable buffer so that threads can't write to it yet */ in test_ringbuffer()
7632 ring_buffer_record_off(buffer); in test_ringbuffer()
7635 rb_data[cpu].buffer = buffer; in test_ringbuffer()
7655 ring_buffer_record_on(buffer); in test_ringbuffer()
7657 * Show buffer is enabled before setting rb_test_started. in test_ringbuffer()
7660 * buffer gets enabled, there will always be some kind of in test_ringbuffer()
7663 * the threads see that the buffer is active. in test_ringbuffer()
7681 ring_buffer_free(buffer); in test_ringbuffer()
7721 if (RB_WARN_ON(buffer, total_dropped)) in test_ringbuffer()
7726 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
7733 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
7735 RB_WARN_ON(buffer, 1); in test_ringbuffer()
7755 if (RB_WARN_ON(buffer, total_len != total_alloc || in test_ringbuffer()
7759 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) in test_ringbuffer()
7765 pr_info("Ring buffer PASSED!\n"); in test_ringbuffer()
7767 ring_buffer_free(buffer); in test_ringbuffer()