Lines Matching refs:bpage
386 static void rb_init_page(struct buffer_data_page *bpage) in rb_init_page() argument
388 local_set(&bpage->commit, 0); in rb_init_page()
391 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage) in rb_page_commit() argument
393 return local_read(&bpage->page->commit); in rb_page_commit()
396 static void free_buffer_page(struct buffer_page *bpage) in free_buffer_page() argument
399 if (!bpage->range) in free_buffer_page()
400 free_pages((unsigned long)bpage->page, bpage->order); in free_buffer_page()
401 kfree(bpage); in free_buffer_page()
1389 static inline void rb_inc_page(struct buffer_page **bpage) in rb_inc_page() argument
1391 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1393 *bpage = list_entry(p, struct buffer_page, list); in rb_inc_page()
1396 static inline void rb_dec_page(struct buffer_page **bpage) in rb_dec_page() argument
1398 struct list_head *p = rb_list_head((*bpage)->list.prev); in rb_dec_page()
1400 *bpage = list_entry(p, struct buffer_page, list); in rb_dec_page()
1518 struct buffer_page *bpage) in rb_check_bpage() argument
1520 unsigned long val = (unsigned long)bpage; in rb_check_bpage()
1981 struct buffer_page *bpage = orig_head; in rb_meta_validate_events() local
1983 rb_dec_page(&bpage); in rb_meta_validate_events()
1992 bpage->list.next = &cpu_buffer->reader_page->list; in rb_meta_validate_events()
1996 bpage = head_page; in rb_meta_validate_events()
1998 head_page->list.prev = bpage->list.prev; in rb_meta_validate_events()
1999 rb_dec_page(&bpage); in rb_meta_validate_events()
2000 bpage->list.next = &head_page->list; in rb_meta_validate_events()
2001 rb_set_list_to_head(&bpage->list); in rb_meta_validate_events()
2008 bpage = cpu_buffer->reader_page; in rb_meta_validate_events()
2009 meta->buffers[0] = rb_meta_subbuf_idx(meta, bpage->page); in rb_meta_validate_events()
2010 bpage->id = 0; in rb_meta_validate_events()
2012 for (i = 1, bpage = head_page; i < meta->nr_subbufs; in rb_meta_validate_events()
2013 i++, rb_inc_page(&bpage)) { in rb_meta_validate_events()
2014 meta->buffers[i] = rb_meta_subbuf_idx(meta, bpage->page); in rb_meta_validate_events()
2015 bpage->id = i; in rb_meta_validate_events()
2222 struct buffer_page *bpage) in rb_meta_buffer_update() argument
2226 if (meta->head_buffer == (unsigned long)bpage->page) in rb_meta_buffer_update()
2227 cpu_buffer->head_page = bpage; in rb_meta_buffer_update()
2229 if (meta->commit_buffer == (unsigned long)bpage->page) { in rb_meta_buffer_update()
2230 cpu_buffer->commit_page = bpage; in rb_meta_buffer_update()
2231 cpu_buffer->tail_page = bpage; in rb_meta_buffer_update()
2240 struct buffer_page *bpage, *tmp; in __rb_allocate_pages() local
2272 bpage = alloc_cpu_page(cpu_buffer->cpu); in __rb_allocate_pages()
2273 if (!bpage) in __rb_allocate_pages()
2276 rb_check_bpage(cpu_buffer, bpage); in __rb_allocate_pages()
2282 list_add_tail(&bpage->list, pages); in __rb_allocate_pages()
2286 bpage->page = rb_range_buffer(cpu_buffer, i + 1); in __rb_allocate_pages()
2287 if (!bpage->page) in __rb_allocate_pages()
2291 rb_meta_buffer_update(cpu_buffer, bpage); in __rb_allocate_pages()
2292 bpage->range = 1; in __rb_allocate_pages()
2293 bpage->id = i + 1; in __rb_allocate_pages()
2296 bpage->page = alloc_cpu_data(cpu_buffer->cpu, order); in __rb_allocate_pages()
2297 if (!bpage->page) in __rb_allocate_pages()
2300 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
2311 list_for_each_entry_safe(bpage, tmp, pages, list) { in __rb_allocate_pages()
2312 list_del_init(&bpage->list); in __rb_allocate_pages()
2313 free_buffer_page(bpage); in __rb_allocate_pages()
2352 struct buffer_page *bpage; in rb_allocate_cpu_buffer() local
2370 bpage = alloc_cpu_page(cpu); in rb_allocate_cpu_buffer()
2371 if (!bpage) in rb_allocate_cpu_buffer()
2374 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2376 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
2385 bpage->page = rb_range_buffer(cpu_buffer, 0); in rb_allocate_cpu_buffer()
2386 if (!bpage->page) in rb_allocate_cpu_buffer()
2389 rb_meta_buffer_update(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2390 bpage->range = 1; in rb_allocate_cpu_buffer()
2393 bpage->page = alloc_cpu_data(cpu, order); in rb_allocate_cpu_buffer()
2394 if (!bpage->page) in rb_allocate_cpu_buffer()
2446 struct buffer_page *bpage, *tmp; in rb_free_cpu_buffer() local
2455 list_for_each_entry_safe(bpage, tmp, head, list) { in rb_free_cpu_buffer()
2456 list_del_init(&bpage->list); in rb_free_cpu_buffer()
2457 free_buffer_page(bpage); in rb_free_cpu_buffer()
2459 bpage = list_entry(head, struct buffer_page, list); in rb_free_cpu_buffer()
2460 free_buffer_page(bpage); in rb_free_cpu_buffer()
2704 static inline unsigned long rb_page_entries(struct buffer_page *bpage) in rb_page_entries() argument
2706 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
2709 static inline unsigned long rb_page_write(struct buffer_page *bpage) in rb_page_write() argument
2711 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
2896 struct buffer_page *bpage, *tmp; in rb_insert_pages() local
2897 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2899 list_del_init(&bpage->list); in rb_insert_pages()
2900 free_buffer_page(bpage); in rb_insert_pages()
3128 struct buffer_page *bpage, *tmp; in ring_buffer_resize() local
3136 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
3138 list_del_init(&bpage->list); in ring_buffer_resize()
3139 free_buffer_page(bpage); in ring_buffer_resize()
3162 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) in __rb_page_index() argument
3164 return bpage->page->data + index; in __rb_page_index()
3234 static __always_inline unsigned rb_page_size(struct buffer_page *bpage) in rb_page_size() argument
3236 return rb_page_commit(bpage) & ~RB_MISSED_MASK; in rb_page_size()
3837 struct buffer_page *bpage; in rb_try_to_discard() local
3845 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3851 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
3853 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
3887 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { in rb_try_to_discard()
4304 static void dump_buffer_page(struct buffer_data_page *bpage, in dump_buffer_page() argument
4312 ts = bpage->time_stamp; in dump_buffer_page()
4317 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
4367 dump_buffer_page(bpage, info, tail); \
4383 struct buffer_data_page *bpage; in check_buffer() local
4388 bpage = info->tail_page->page; in check_buffer()
4392 tail = local_read(&bpage->commit); in check_buffer()
4403 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
4412 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); in check_buffer()
4732 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry() local
4738 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
4739 local_dec(&bpage->entries); in rb_decrement_entry()
4747 rb_inc_page(&bpage); in rb_decrement_entry()
4748 start = bpage; in rb_decrement_entry()
4750 if (bpage->page == (void *)addr) { in rb_decrement_entry()
4751 local_dec(&bpage->entries); in rb_decrement_entry()
4754 rb_inc_page(&bpage); in rb_decrement_entry()
4755 } while (bpage != start); in rb_decrement_entry()
5062 struct buffer_page *bpage; in ring_buffer_oldest_event_ts() local
5075 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
5077 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
5078 if (bpage) in ring_buffer_oldest_event_ts()
5079 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
6104 struct buffer_page *bpage, int id) in rb_page_id() argument
6111 id = rb_meta_subbuf_idx(cpu_buffer->ring_meta, bpage->page); in rb_page_id()
6113 bpage->id = id; in rb_page_id()
6483 struct buffer_data_read_page *bpage = NULL; in ring_buffer_alloc_read_page() local
6489 bpage = kzalloc(sizeof(*bpage), GFP_KERNEL); in ring_buffer_alloc_read_page()
6490 if (!bpage) in ring_buffer_alloc_read_page()
6493 bpage->order = buffer->subbuf_order; in ring_buffer_alloc_read_page()
6499 bpage->data = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
6506 if (bpage->data) { in ring_buffer_alloc_read_page()
6507 rb_init_page(bpage->data); in ring_buffer_alloc_read_page()
6509 bpage->data = alloc_cpu_data(cpu, cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
6510 if (!bpage->data) { in ring_buffer_alloc_read_page()
6511 kfree(bpage); in ring_buffer_alloc_read_page()
6516 return bpage; in ring_buffer_alloc_read_page()
6532 struct buffer_data_page *bpage = data_page->data; in ring_buffer_free_read_page() local
6533 struct page *page = virt_to_page(bpage); in ring_buffer_free_read_page()
6553 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
6554 bpage = NULL; in ring_buffer_free_read_page()
6561 free_pages((unsigned long)bpage, data_page->order); in ring_buffer_free_read_page()
6606 struct buffer_data_page *bpage; in ring_buffer_read_page() local
6631 bpage = data_page->data; in ring_buffer_read_page()
6632 if (!bpage) in ring_buffer_read_page()
6696 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
6713 local_set(&bpage->commit, pos); in ring_buffer_read_page()
6714 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
6724 rb_init_page(bpage); in ring_buffer_read_page()
6725 bpage = reader->page; in ring_buffer_read_page()
6730 data_page->data = bpage; in ring_buffer_read_page()
6738 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
6743 commit = local_read(&bpage->commit); in ring_buffer_read_page()
6752 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
6754 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
6757 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
6764 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit); in ring_buffer_read_page()
6834 struct buffer_page *bpage, *tmp; in ring_buffer_subbuf_order_set() local
6954 list_for_each_entry_safe(bpage, tmp, &old_pages, list) { in ring_buffer_subbuf_order_set()
6955 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
6956 free_buffer_page(bpage); in ring_buffer_subbuf_order_set()
6979 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { in ring_buffer_subbuf_order_set()
6980 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
6981 free_buffer_page(bpage); in ring_buffer_subbuf_order_set()
7371 struct buffer_data_page *bpage = reader->page; in ring_buffer_map_get_reader() local
7379 local_set(&bpage->commit, reader->real_end); in ring_buffer_map_get_reader()
7386 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_map_get_reader()
7388 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_map_get_reader()
7390 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_map_get_reader()