Lines Matching refs:cpu_buffer

565 	struct ring_buffer_per_cpu	*cpu_buffer;  member
626 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
629 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
630 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
653 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
700 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp() local
707 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
710 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
711 verify_event(cpu_buffer, event); in ring_buffer_event_time_stamp()
717 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
723 rb_time_read(&cpu_buffer->write_stamp, &ts); in ring_buffer_event_time_stamp()
761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit() local
765 nr_pages = cpu_buffer->nr_pages; in full_hit()
795 struct ring_buffer_per_cpu *cpu_buffer = in rb_wake_up_waiters() local
799 raw_spin_lock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
804 cpu_buffer->shortest_full = 0; in rb_wake_up_waiters()
805 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
821 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters() local
840 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
842 if (!cpu_buffer) in ring_buffer_wake_waiters()
844 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
853 struct ring_buffer_per_cpu *cpu_buffer; in rb_watermark_hit() local
860 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
869 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
870 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_watermark_hit()
873 if (!ret && (!cpu_buffer->shortest_full || in rb_watermark_hit()
874 cpu_buffer->shortest_full > full)) { in rb_watermark_hit()
875 cpu_buffer->shortest_full = full; in rb_watermark_hit()
877 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
952 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
970 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
971 rbwork = &cpu_buffer->irq_work; in ring_buffer_wait()
1011 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1021 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1022 rbwork = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1263 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
1267 head = cpu_buffer->head_page; in rb_head_page_activate()
1276 if (cpu_buffer->ring_meta) { in rb_head_page_activate()
1277 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_head_page_activate()
1293 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
1298 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1300 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1304 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
1327 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
1332 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1336 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
1341 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
1345 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
1350 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
1369 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1376 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1380 list = cpu_buffer->pages; in rb_set_head_page()
1381 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1384 page = head = cpu_buffer->head_page; in rb_set_head_page()
1394 cpu_buffer->head_page = page; in rb_set_head_page()
1401 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1421 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1451 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1477 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page)) in rb_tail_page_update()
1478 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1482 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1487 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); in rb_check_bpage()
1490 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_links() argument
1493 if (RB_WARN_ON(cpu_buffer, in rb_check_links()
1497 if (RB_WARN_ON(cpu_buffer, in rb_check_links()
1511 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1541 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1542 head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1543 if (!rb_check_links(cpu_buffer, head)) in rb_check_pages()
1545 buffer_cnt = cpu_buffer->cnt; in rb_check_pages()
1547 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1550 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1552 if (buffer_cnt != cpu_buffer->cnt) { in rb_check_pages()
1554 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1563 if (!rb_check_links(cpu_buffer, tmp)) in rb_check_pages()
1566 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1570 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1654 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx) in rb_range_buffer() argument
1660 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); in rb_range_buffer()
1675 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end) in rb_range_buffer()
1873 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_meta_validate_events() argument
1875 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_meta_validate_events()
1887 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1893 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); in rb_meta_validate_events()
1894 local_set(&cpu_buffer->reader_page->entries, ret); in rb_meta_validate_events()
1896 orig_head = head_page = cpu_buffer->head_page; in rb_meta_validate_events()
1903 if (head_page == cpu_buffer->tail_page) in rb_meta_validate_events()
1910 if (head_page == cpu_buffer->tail_page) in rb_meta_validate_events()
1923 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1930 local_inc(&cpu_buffer->pages_touched); in rb_meta_validate_events()
1935 pr_info("Ring buffer [%d] rewound %d pages\n", cpu_buffer->cpu, i); in rb_meta_validate_events()
1954 cpu_buffer->reader_page->list.next = &orig_head->list; in rb_meta_validate_events()
1955 cpu_buffer->reader_page->list.prev = orig_head->list.prev; in rb_meta_validate_events()
1956 orig_head->list.prev = &cpu_buffer->reader_page->list; in rb_meta_validate_events()
1957 bpage->list.next = &cpu_buffer->reader_page->list; in rb_meta_validate_events()
1960 cpu_buffer->reader_page = head_page; in rb_meta_validate_events()
1967 cpu_buffer->pages = &head_page->list; in rb_meta_validate_events()
1969 cpu_buffer->head_page = head_page; in rb_meta_validate_events()
1973 bpage = cpu_buffer->reader_page; in rb_meta_validate_events()
1989 if (meta->commit_buffer == (unsigned long)cpu_buffer->reader_page->page) { in rb_meta_validate_events()
1990 cpu_buffer->commit_page = cpu_buffer->reader_page; in rb_meta_validate_events()
1999 if (head_page == cpu_buffer->reader_page) in rb_meta_validate_events()
2002 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
2005 cpu_buffer->cpu); in rb_meta_validate_events()
2011 local_inc(&cpu_buffer->pages_touched); in rb_meta_validate_events()
2015 local_set(&cpu_buffer->head_page->entries, ret); in rb_meta_validate_events()
2017 if (head_page == cpu_buffer->commit_page) in rb_meta_validate_events()
2021 if (head_page != cpu_buffer->commit_page) { in rb_meta_validate_events()
2023 cpu_buffer->cpu); in rb_meta_validate_events()
2027 local_set(&cpu_buffer->entries, entries); in rb_meta_validate_events()
2028 local_set(&cpu_buffer->entries_bytes, entry_bytes); in rb_meta_validate_events()
2030 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); in rb_meta_validate_events()
2039 local_set(&cpu_buffer->reader_page->entries, 0); in rb_meta_validate_events()
2040 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_meta_validate_events()
2114 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start() local
2115 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rbm_start()
2139 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show() local
2140 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rbm_show()
2186 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_meta_buffer_update() argument
2189 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_meta_buffer_update()
2192 cpu_buffer->head_page = bpage; in rb_meta_buffer_update()
2195 cpu_buffer->commit_page = bpage; in rb_meta_buffer_update()
2196 cpu_buffer->tail_page = bpage; in rb_meta_buffer_update()
2200 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages() argument
2203 struct trace_buffer *buffer = cpu_buffer->buffer; in __rb_allocate_pages()
2241 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); in __rb_allocate_pages()
2247 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
2251 rb_check_bpage(cpu_buffer, bpage); in __rb_allocate_pages()
2261 bpage->page = rb_range_buffer(cpu_buffer, i + 1); in __rb_allocate_pages()
2266 rb_meta_buffer_update(cpu_buffer, bpage); in __rb_allocate_pages()
2270 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), in __rb_allocate_pages()
2272 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
2278 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
2299 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
2306 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages()
2314 cpu_buffer->pages = pages.next; in rb_allocate_pages()
2317 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
2319 rb_check_pages(cpu_buffer); in rb_allocate_pages()
2327 struct ring_buffer_per_cpu *cpu_buffer __free(kfree) = NULL; in rb_allocate_cpu_buffer()
2333 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
2335 if (!cpu_buffer) in rb_allocate_cpu_buffer()
2338 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
2339 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
2340 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
2341 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
2342 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
2343 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
2344 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
2345 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
2346 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
2347 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
2348 mutex_init(&cpu_buffer->mapping_lock); in rb_allocate_cpu_buffer()
2355 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2357 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
2364 cpu_buffer->mapped = 1; in rb_allocate_cpu_buffer()
2365 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); in rb_allocate_cpu_buffer()
2366 bpage->page = rb_range_buffer(cpu_buffer, 0); in rb_allocate_cpu_buffer()
2369 if (cpu_buffer->ring_meta->head_buffer) in rb_allocate_cpu_buffer()
2370 rb_meta_buffer_update(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2375 cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
2382 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
2383 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
2385 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
2389 rb_meta_validate_events(cpu_buffer); in rb_allocate_cpu_buffer()
2392 meta = cpu_buffer->ring_meta; in rb_allocate_cpu_buffer()
2394 !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) { in rb_allocate_cpu_buffer()
2396 (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) { in rb_allocate_cpu_buffer()
2398 if (!cpu_buffer->head_page) in rb_allocate_cpu_buffer()
2400 if (!cpu_buffer->commit_page) in rb_allocate_cpu_buffer()
2402 if (!cpu_buffer->tail_page) in rb_allocate_cpu_buffer()
2406 cpu_buffer->head_page in rb_allocate_cpu_buffer()
2407 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
2408 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
2410 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
2412 if (cpu_buffer->ring_meta) in rb_allocate_cpu_buffer()
2416 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
2419 return_ptr(cpu_buffer); in rb_allocate_cpu_buffer()
2422 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
2427 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
2429 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
2432 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
2434 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
2437 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
2447 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
2449 kfree(cpu_buffer); in rb_free_cpu_buffer()
2699 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
2710 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2711 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
2721 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
2727 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
2740 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
2755 cpu_buffer->pages = next_page; in rb_remove_pages()
2756 cpu_buffer->cnt++; in rb_remove_pages()
2760 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
2764 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
2765 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2767 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2789 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2790 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2791 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2803 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
2809 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
2811 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2817 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2838 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); in rb_insert_pages()
2863 cpu_buffer->cnt++; in rb_insert_pages()
2875 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
2876 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2881 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2890 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
2894 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2895 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
2897 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
2898 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2901 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2906 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2908 rb_update_pages(cpu_buffer); in update_pages_handler()
2909 complete(&cpu_buffer->update_done); in update_pages_handler()
2925 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
2963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2964 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2972 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2974 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2975 cpu_buffer->nr_pages; in ring_buffer_resize()
2979 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2985 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2986 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2987 &cpu_buffer->new_pages)) { in ring_buffer_resize()
3002 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3003 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
3008 rb_update_pages(cpu_buffer); in ring_buffer_resize()
3009 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3016 &cpu_buffer->update_pages_work); in ring_buffer_resize()
3018 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
3026 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3027 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
3031 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
3032 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3036 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
3038 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
3046 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
3051 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
3052 cpu_buffer->nr_pages; in ring_buffer_resize()
3054 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
3055 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
3056 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
3057 &cpu_buffer->new_pages)) { in ring_buffer_resize()
3064 rb_update_pages(cpu_buffer); in ring_buffer_resize()
3069 rb_update_pages(cpu_buffer); in ring_buffer_resize()
3074 &cpu_buffer->update_pages_work); in ring_buffer_resize()
3075 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
3079 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3101 rb_check_pages(cpu_buffer); in ring_buffer_resize()
3114 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3115 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3117 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
3120 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
3150 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
3152 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
3153 cpu_buffer->reader_page->read); in rb_reader_event()
3222 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
3224 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
3228 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) in rb_event_index() argument
3232 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
3239 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
3247 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
3248 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
3267 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_head() argument
3270 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_head()
3284 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_reader() argument
3287 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_reader()
3288 void *old_reader = cpu_buffer->reader_page->page; in rb_update_meta_reader()
3293 cpu_buffer->reader_page->id = id; in rb_update_meta_reader()
3300 rb_update_meta_head(cpu_buffer, reader); in rb_update_meta_reader()
3311 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
3327 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
3348 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
3349 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
3350 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
3352 if (cpu_buffer->ring_meta) in rb_handle_head_page()
3353 rb_update_meta_head(cpu_buffer, next_page); in rb_handle_head_page()
3383 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
3404 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
3421 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
3438 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
3445 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
3456 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
3459 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
3468 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
3471 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
3535 local_add(bsize - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
3545 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3551 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
3555 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
3556 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
3570 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3594 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
3600 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
3604 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
3622 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
3623 cpu_buffer->tail_page) && in rb_move_tail()
3624 (cpu_buffer->commit_page == in rb_move_tail()
3625 cpu_buffer->reader_page))) { in rb_move_tail()
3626 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3632 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
3636 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
3639 rb_end_commit(cpu_buffer); in rb_move_tail()
3641 local_inc(&cpu_buffer->committing); in rb_move_tail()
3648 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
3655 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_time_stamp() argument
3664 if (abs || rb_event_index(cpu_buffer, event)) { in rb_add_time_stamp()
3684 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp() argument
3694 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}), in rb_check_timestamp()
3702 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp() argument
3734 rb_check_timestamp(cpu_buffer, info); in rb_add_timestamp()
3738 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs); in rb_add_timestamp()
3755 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
3761 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
3764 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
3771 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); in rb_update_event()
3815 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
3822 new_index = rb_event_index(cpu_buffer, event); in rb_try_to_discard()
3825 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
3827 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3849 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3871 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3880 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
3882 local_inc(&cpu_buffer->committing); in rb_start_commit()
3883 local_inc(&cpu_buffer->commits); in rb_start_commit()
3887 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
3900 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3902 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3903 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3905 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3906 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3912 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3913 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3914 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3915 if (cpu_buffer->ring_meta) { in rb_set_commit_to_write()
3916 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_set_commit_to_write()
3917 meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page; in rb_set_commit_to_write()
3922 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
3923 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3927 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3928 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3929 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3930 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3943 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3947 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
3951 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
3952 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3956 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3959 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3960 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
3962 local_dec(&cpu_buffer->committing); in rb_end_commit()
3972 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3973 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3974 local_inc(&cpu_buffer->committing); in rb_end_commit()
3992 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit() argument
3994 local_inc(&cpu_buffer->entries); in rb_commit()
3995 rb_end_commit(cpu_buffer); in rb_commit()
3999 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
4007 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
4008 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
4010 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
4013 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
4016 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
4019 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
4022 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
4024 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
4027 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
4028 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
4030 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
4103 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
4105 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
4110 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
4117 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
4123 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
4124 cpu_buffer->current_context = val; in trace_recursive_lock()
4130 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
4132 cpu_buffer->current_context &= in trace_recursive_unlock()
4133 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
4154 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
4160 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
4162 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
4174 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
4179 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
4181 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
4195 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
4198 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
4200 rb_commit(cpu_buffer); in ring_buffer_unlock_commit()
4202 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
4204 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
4347 atomic_inc(&cpu_buffer->record_disabled); \
4361 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
4394 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); in check_buffer()
4398 cpu_buffer->cpu, ts, delta); in check_buffer()
4405 cpu_buffer->cpu, in check_buffer()
4414 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
4422 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
4430 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
4434 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4435 rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4437 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4462 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
4472 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
4473 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); in __rb_reserve_next()
4474 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
4479 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
4493 check_buffer(cpu_buffer, info, tail); in __rb_reserve_next()
4499 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4507 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4508 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
4511 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4547 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
4559 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
4566 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
4585 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
4596 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
4597 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
4598 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
4605 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
4608 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
4627 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
4630 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
4641 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
4663 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
4678 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
4680 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
4686 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
4689 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
4696 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
4710 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
4714 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
4717 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
4740 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
4765 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
4772 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
4779 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
4781 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
4782 rb_try_to_discard(cpu_buffer, event); in ring_buffer_discard_commit()
4783 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
4785 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
4809 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
4825 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4827 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
4833 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
4836 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
4844 rb_commit(cpu_buffer); in ring_buffer_write()
4846 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
4851 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
4863 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
4865 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4866 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4869 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
4871 return !rb_num_of_entries(cpu_buffer); in rb_per_cpu_empty()
4985 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_is_on_cpu() local
4987 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_is_on_cpu()
4990 !atomic_read(&cpu_buffer->record_disabled); in ring_buffer_record_is_on_cpu()
5005 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
5010 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
5011 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
5025 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
5030 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
5031 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
5043 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
5050 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
5051 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
5056 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
5057 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
5059 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
5062 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
5075 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
5081 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
5082 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
5095 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
5100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
5102 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
5114 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
5120 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
5121 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
5137 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
5143 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
5144 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
5159 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
5165 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
5166 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
5180 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
5185 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
5186 return cpu_buffer->read; in ring_buffer_read_events_cpu()
5199 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
5205 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
5206 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
5222 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
5228 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
5229 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
5238 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
5241 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
5242 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
5246 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
5247 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
5250 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
5251 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
5267 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
5273 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
5275 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5277 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5287 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
5296 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
5297 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
5298 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
5299 commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5313 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5325 iter->head == rb_page_size(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
5330 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
5341 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
5346 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
5347 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
5351 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
5355 RB_WARN_ON(cpu_buffer, 1); in rb_update_read_stamp()
5385 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
5390 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
5393 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
5400 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
5409 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
5414 reader = cpu_buffer->reader_page; in rb_get_reader_page()
5417 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
5421 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
5422 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
5427 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
5431 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
5437 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
5438 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
5439 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
5445 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
5448 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
5449 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
5456 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
5459 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
5471 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
5484 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
5492 if (cpu_buffer->ring_meta) in rb_get_reader_page()
5493 rb_update_meta_reader(cpu_buffer, reader); in rb_get_reader_page()
5500 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
5501 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
5503 cpu_buffer->cnt++; in rb_get_reader_page()
5504 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
5507 cpu_buffer->reader_page = reader; in rb_get_reader_page()
5508 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
5510 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
5511 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
5512 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
5520 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
5522 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
5542 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) in rb_get_reader_page()
5560 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
5566 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
5569 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
5572 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
5575 cpu_buffer->read++; in rb_advance_reader()
5577 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
5580 cpu_buffer->reader_page->read += length; in rb_advance_reader()
5581 cpu_buffer->read_bytes += length; in rb_advance_reader()
5586 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
5588 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
5604 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
5613 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
5615 return cpu_buffer->lost_events; in rb_lost_events()
5619 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
5635 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
5638 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
5642 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
5647 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
5660 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
5667 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5668 cpu_buffer->cpu, ts); in rb_buffer_peek()
5671 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
5676 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
5677 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5678 cpu_buffer->cpu, ts); in rb_buffer_peek()
5681 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
5685 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
5696 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
5703 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
5704 buffer = cpu_buffer->buffer; in rb_iter_peek()
5711 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
5712 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
5713 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
5730 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
5760 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
5761 cpu_buffer->cpu, ts); in rb_iter_peek()
5771 cpu_buffer->cpu, ts); in rb_iter_peek()
5776 RB_WARN_ON(cpu_buffer, 1); in rb_iter_peek()
5783 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
5786 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
5799 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
5803 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
5808 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
5811 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
5828 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
5838 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
5839 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
5841 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
5842 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
5876 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
5881 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5883 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5906 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
5918 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5920 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
5922 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
5924 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5925 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
5928 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
5957 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
5975 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_start()
5977 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_start()
5979 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_start()
5981 guard(raw_spinlock_irqsave)(&cpu_buffer->reader_lock); in ring_buffer_read_start()
5982 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5984 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5999 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
6002 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
6004 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
6019 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
6022 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
6026 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
6085 static int rb_page_id(struct ring_buffer_per_cpu *cpu_buffer, in rb_page_id() argument
6092 if (cpu_buffer->ring_meta) in rb_page_id()
6093 id = rb_meta_subbuf_idx(cpu_buffer->ring_meta, bpage->page); in rb_page_id()
6100 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_meta_page() argument
6102 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_update_meta_page()
6107 meta->reader.read = cpu_buffer->reader_page->read; in rb_update_meta_page()
6108 meta->reader.id = rb_page_id(cpu_buffer, cpu_buffer->reader_page, in rb_update_meta_page()
6109 cpu_buffer->reader_page->id); in rb_update_meta_page()
6111 meta->reader.lost_events = cpu_buffer->lost_events; in rb_update_meta_page()
6113 meta->entries = local_read(&cpu_buffer->entries); in rb_update_meta_page()
6114 meta->overrun = local_read(&cpu_buffer->overrun); in rb_update_meta_page()
6115 meta->read = cpu_buffer->read; in rb_update_meta_page()
6118 flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE); in rb_update_meta_page()
6122 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
6126 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
6128 cpu_buffer->head_page in rb_reset_cpu()
6129 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
6130 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
6131 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
6135 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
6136 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
6138 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
6139 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
6140 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
6142 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
6143 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
6144 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
6145 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
6146 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
6147 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
6148 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
6149 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
6150 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
6151 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
6152 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
6153 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
6154 cpu_buffer->read = 0; in rb_reset_cpu()
6155 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
6157 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
6158 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
6160 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
6162 cpu_buffer->lost_events = 0; in rb_reset_cpu()
6163 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
6165 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
6166 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
6168 if (cpu_buffer->mapped) { in rb_reset_cpu()
6169 rb_update_meta_page(cpu_buffer); in rb_reset_cpu()
6170 if (cpu_buffer->ring_meta) { in rb_reset_cpu()
6171 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_reset_cpu()
6178 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer() argument
6180 guard(raw_spinlock_irqsave)(&cpu_buffer->reader_lock); in reset_disabled_cpu_buffer()
6182 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
6185 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6187 rb_reset_cpu(cpu_buffer); in reset_disabled_cpu_buffer()
6189 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6199 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
6207 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6208 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6213 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_cpu()
6215 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6216 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6231 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
6238 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6240 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6241 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6248 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6254 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
6257 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_online_cpus()
6259 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6260 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6272 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
6279 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6281 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6282 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
6289 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6291 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset()
6293 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
6294 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6307 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
6315 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
6317 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
6318 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
6319 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
6337 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
6345 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
6347 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
6348 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
6349 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
6464 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
6477 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
6479 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6481 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
6482 bpage->data = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
6483 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
6486 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6494 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
6520 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page() local
6528 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
6539 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6541 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
6542 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
6546 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6593 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
6624 guard(raw_spinlock_irqsave)(&cpu_buffer->reader_lock); in ring_buffer_read_page()
6626 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
6630 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
6636 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
6646 cpu_buffer->reader_page == cpu_buffer->commit_page || in ring_buffer_read_page()
6647 cpu_buffer->mapped) { in ring_buffer_read_page()
6648 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
6661 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
6674 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
6689 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
6696 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
6709 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
6710 cpu_buffer->read_bytes += rb_page_size(reader); in ring_buffer_read_page()
6730 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
6822 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_subbuf_order_set() local
6863 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6865 if (cpu_buffer->mapped) { in ring_buffer_subbuf_order_set()
6878 cpu_buffer->nr_pages_to_update = nr_pages; in ring_buffer_subbuf_order_set()
6884 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6885 if (__rb_allocate_pages(cpu_buffer, nr_pages, in ring_buffer_subbuf_order_set()
6886 &cpu_buffer->new_pages)) { in ring_buffer_subbuf_order_set()
6901 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6903 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6906 rb_head_page_deactivate(cpu_buffer); in ring_buffer_subbuf_order_set()
6915 list_add(&old_pages, cpu_buffer->pages); in ring_buffer_subbuf_order_set()
6916 list_add(&cpu_buffer->reader_page->list, &old_pages); in ring_buffer_subbuf_order_set()
6919 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, in ring_buffer_subbuf_order_set()
6921 list_del_init(&cpu_buffer->reader_page->list); in ring_buffer_subbuf_order_set()
6924 cpu_buffer->pages = cpu_buffer->new_pages.next; in ring_buffer_subbuf_order_set()
6925 list_del_init(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6926 cpu_buffer->cnt++; in ring_buffer_subbuf_order_set()
6928 cpu_buffer->head_page in ring_buffer_subbuf_order_set()
6929 = list_entry(cpu_buffer->pages, struct buffer_page, list); in ring_buffer_subbuf_order_set()
6930 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in ring_buffer_subbuf_order_set()
6932 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; in ring_buffer_subbuf_order_set()
6933 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_subbuf_order_set()
6935 old_free_data_page = cpu_buffer->free_page; in ring_buffer_subbuf_order_set()
6936 cpu_buffer->free_page = NULL; in ring_buffer_subbuf_order_set()
6938 rb_head_page_activate(cpu_buffer); in ring_buffer_subbuf_order_set()
6940 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6949 rb_check_pages(cpu_buffer); in ring_buffer_subbuf_order_set()
6963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6965 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_subbuf_order_set()
6968 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { in ring_buffer_subbuf_order_set()
6978 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_alloc_meta_page() argument
6982 if (cpu_buffer->meta_page) in rb_alloc_meta_page()
6989 cpu_buffer->meta_page = page_to_virt(page); in rb_alloc_meta_page()
6994 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_meta_page() argument
6996 unsigned long addr = (unsigned long)cpu_buffer->meta_page; in rb_free_meta_page()
6999 cpu_buffer->meta_page = NULL; in rb_free_meta_page()
7002 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_setup_ids_meta_page() argument
7005 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_setup_ids_meta_page()
7006 unsigned int nr_subbufs = cpu_buffer->nr_pages + 1; in rb_setup_ids_meta_page()
7011 id = rb_page_id(cpu_buffer, cpu_buffer->reader_page, id); in rb_setup_ids_meta_page()
7012 subbuf_ids[id++] = (unsigned long)cpu_buffer->reader_page->page; in rb_setup_ids_meta_page()
7015 first_subbuf = subbuf = rb_set_head_page(cpu_buffer); in rb_setup_ids_meta_page()
7017 id = rb_page_id(cpu_buffer, subbuf, id); in rb_setup_ids_meta_page()
7032 cpu_buffer->subbuf_ids = subbuf_ids; in rb_setup_ids_meta_page()
7036 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_setup_ids_meta_page()
7039 rb_update_meta_page(cpu_buffer); in rb_setup_ids_meta_page()
7045 struct ring_buffer_per_cpu *cpu_buffer; in rb_get_mapped_buffer() local
7050 cpu_buffer = buffer->buffers[cpu]; in rb_get_mapped_buffer()
7052 mutex_lock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
7054 if (!cpu_buffer->user_mapped) { in rb_get_mapped_buffer()
7055 mutex_unlock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
7059 return cpu_buffer; in rb_get_mapped_buffer()
7062 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_put_mapped_buffer() argument
7064 mutex_unlock(&cpu_buffer->mapping_lock); in rb_put_mapped_buffer()
7071 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer, in __rb_inc_dec_mapped() argument
7076 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_inc_dec_mapped()
7079 if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped)) in __rb_inc_dec_mapped()
7082 if (inc && cpu_buffer->mapped == UINT_MAX) in __rb_inc_dec_mapped()
7085 if (WARN_ON(!inc && cpu_buffer->user_mapped == 0)) in __rb_inc_dec_mapped()
7088 mutex_lock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7089 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
7092 cpu_buffer->user_mapped++; in __rb_inc_dec_mapped()
7093 cpu_buffer->mapped++; in __rb_inc_dec_mapped()
7095 cpu_buffer->user_mapped--; in __rb_inc_dec_mapped()
7096 cpu_buffer->mapped--; in __rb_inc_dec_mapped()
7099 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
7100 mutex_unlock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7117 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma() argument
7131 subbuf_order = cpu_buffer->buffer->subbuf_order; in __rb_map_vma()
7144 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_map_vma()
7146 nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */ in __rb_map_vma()
7166 pages[p++] = virt_to_page(cpu_buffer->meta_page); in __rb_map_vma()
7193 page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]); in __rb_map_vma()
7209 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma() argument
7219 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map() local
7226 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_map()
7228 guard(mutex)(&cpu_buffer->mapping_lock); in ring_buffer_map()
7230 if (cpu_buffer->user_mapped) { in ring_buffer_map()
7231 err = __rb_map_vma(cpu_buffer, vma); in ring_buffer_map()
7233 err = __rb_inc_dec_mapped(cpu_buffer, true); in ring_buffer_map()
7240 err = rb_alloc_meta_page(cpu_buffer); in ring_buffer_map()
7245 subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL); in ring_buffer_map()
7247 rb_free_meta_page(cpu_buffer); in ring_buffer_map()
7251 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_map()
7257 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7258 rb_setup_ids_meta_page(cpu_buffer, subbuf_ids); in ring_buffer_map()
7260 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7262 err = __rb_map_vma(cpu_buffer, vma); in ring_buffer_map()
7264 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7266 cpu_buffer->mapped++; in ring_buffer_map()
7267 cpu_buffer->user_mapped = 1; in ring_buffer_map()
7268 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7270 kfree(cpu_buffer->subbuf_ids); in ring_buffer_map()
7271 cpu_buffer->subbuf_ids = NULL; in ring_buffer_map()
7272 rb_free_meta_page(cpu_buffer); in ring_buffer_map()
7273 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_map()
7281 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unmap() local
7287 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unmap()
7289 guard(mutex)(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7291 if (!cpu_buffer->user_mapped) { in ring_buffer_unmap()
7293 } else if (cpu_buffer->user_mapped > 1) { in ring_buffer_unmap()
7294 __rb_inc_dec_mapped(cpu_buffer, false); in ring_buffer_unmap()
7299 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7302 if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped)) in ring_buffer_unmap()
7303 cpu_buffer->mapped--; in ring_buffer_unmap()
7304 cpu_buffer->user_mapped = 0; in ring_buffer_unmap()
7306 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7308 kfree(cpu_buffer->subbuf_ids); in ring_buffer_unmap()
7309 cpu_buffer->subbuf_ids = NULL; in ring_buffer_unmap()
7310 rb_free_meta_page(cpu_buffer); in ring_buffer_unmap()
7311 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_unmap()
7318 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map_get_reader() local
7324 cpu_buffer = rb_get_mapped_buffer(buffer, cpu); in ring_buffer_map_get_reader()
7325 if (IS_ERR(cpu_buffer)) in ring_buffer_map_get_reader()
7326 return (int)PTR_ERR(cpu_buffer); in ring_buffer_map_get_reader()
7328 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7331 if (rb_per_cpu_empty(cpu_buffer)) in ring_buffer_map_get_reader()
7334 reader_size = rb_page_size(cpu_buffer->reader_page); in ring_buffer_map_get_reader()
7341 if (cpu_buffer->reader_page->read < reader_size) { in ring_buffer_map_get_reader()
7342 while (cpu_buffer->reader_page->read < reader_size) in ring_buffer_map_get_reader()
7343 rb_advance_reader(cpu_buffer); in ring_buffer_map_get_reader()
7347 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_map_get_reader()
7352 missed_events = cpu_buffer->lost_events; in ring_buffer_map_get_reader()
7355 if (cpu_buffer->reader_page != cpu_buffer->commit_page) { in ring_buffer_map_get_reader()
7376 } else if (!WARN_ONCE(cpu_buffer->reader_page == cpu_buffer->tail_page, in ring_buffer_map_get_reader()
7391 cpu, missed_events, cpu_buffer->reader_page->page->time_stamp); in ring_buffer_map_get_reader()
7395 cpu_buffer->lost_events = 0; in ring_buffer_map_get_reader()
7401 flush_kernel_vmap_range(cpu_buffer->reader_page->page, in ring_buffer_map_get_reader()
7404 rb_update_meta_page(cpu_buffer); in ring_buffer_map_get_reader()
7406 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7407 rb_put_mapped_buffer(cpu_buffer); in ring_buffer_map_get_reader()