Lines Matching defs:rb

84 	struct bpf_ringbuf *rb;
101 struct bpf_ringbuf *rb;
138 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
140 if (rb) {
142 rb->pages = pages;
143 rb->nr_pages = nr_pages;
144 return rb;
156 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
158 wake_up_all(&rb->waitq);
174 struct bpf_ringbuf *rb;
176 rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
177 if (!rb)
180 raw_res_spin_lock_init(&rb->spinlock);
181 atomic_set(&rb->busy, 0);
182 init_waitqueue_head(&rb->waitq);
183 init_irq_work(&rb->work, bpf_ringbuf_notify);
185 rb->mask = data_sz - 1;
186 rb->consumer_pos = 0;
187 rb->producer_pos = 0;
188 rb->pending_pos = 0;
189 rb->overwrite_mode = overwrite_mode;
191 return rb;
219 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node, overwrite_mode);
220 if (!rb_map->rb) {
228 static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
230 irq_work_sync(&rb->work);
233 * to unmap rb itself with vunmap() below
235 struct page **pages = rb->pages;
236 int i, nr_pages = rb->nr_pages;
238 vunmap(rb);
249 bpf_ringbuf_free(rb_map->rb);
287 return remap_vmalloc_range(vma, rb_map->rb,
306 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
315 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
319 cons_pos = smp_load_acquire(&rb->consumer_pos);
321 if (unlikely(rb->overwrite_mode)) {
322 over_pos = smp_load_acquire(&rb->overwrite_pos);
323 prod_pos = smp_load_acquire(&rb->producer_pos);
326 prod_pos = smp_load_acquire(&rb->producer_pos);
331 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb)
333 return rb->mask + 1;
342 poll_wait(filp, &rb_map->rb->waitq, pts);
344 if (ringbuf_avail_data_sz(rb_map->rb))
355 poll_wait(filp, &rb_map->rb->waitq, pts);
357 if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb))
364 struct bpf_ringbuf *rb;
369 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
370 usage += (u64)rb->nr_pages << PAGE_SHIFT;
413 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
416 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
431 static bool bpf_ringbuf_has_space(const struct bpf_ringbuf *rb,
440 if (new_prod_pos - pend_pos > rb->mask)
444 if (unlikely(rb->overwrite_mode))
451 if (new_prod_pos - cons_pos > rb->mask)
463 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
473 if (len > ringbuf_total_data_sz(rb))
476 cons_pos = smp_load_acquire(&rb->consumer_pos);
478 if (raw_res_spin_lock_irqsave(&rb->spinlock, flags))
481 pend_pos = rb->pending_pos;
482 prod_pos = rb->producer_pos;
486 hdr = (void *)rb->data + (pend_pos & rb->mask);
492 rb->pending_pos = pend_pos;
494 if (!bpf_ringbuf_has_space(rb, new_prod_pos, cons_pos, pend_pos)) {
495 raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
504 if (unlikely(rb->overwrite_mode)) {
505 over_pos = rb->overwrite_pos;
506 while (new_prod_pos - over_pos > rb->mask) {
507 hdr = (void *)rb->data + (over_pos & rb->mask);
517 * smp_store_release(&rb->producer_pos, new_prod_pos) at
519 * the updated rb->producer_pos, it always sees the updated
520 * rb->overwrite_pos, so when consumer reads overwrite_pos
524 WRITE_ONCE(rb->overwrite_pos, over_pos);
527 hdr = (void *)rb->data + (prod_pos & rb->mask);
528 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
533 smp_store_release(&rb->producer_pos, new_prod_pos);
535 raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
548 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
563 struct bpf_ringbuf *rb;
567 rb = bpf_ringbuf_restore_from_rec(hdr);
578 rec_pos = (void *)hdr - (void *)rb->data;
579 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
582 irq_work_queue(&rb->work);
584 irq_work_queue(&rb->work);
623 rec = __bpf_ringbuf_reserve(rb_map->rb, size);
643 struct bpf_ringbuf *rb;
645 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
649 return ringbuf_avail_data_sz(rb);
651 return ringbuf_total_data_sz(rb);
653 return smp_load_acquire(&rb->consumer_pos);
655 return smp_load_acquire(&rb->producer_pos);
657 return smp_load_acquire(&rb->overwrite_pos);
690 sample = __bpf_ringbuf_reserve(rb_map->rb, size);
748 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size)
755 prod_pos = smp_load_acquire(&rb->producer_pos);
760 cons_pos = smp_load_acquire(&rb->consumer_pos);
764 hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask));
776 if (total_len > ringbuf_total_data_sz(rb))
790 smp_store_release(&rb->consumer_pos, cons_pos + total_len);
797 *sample = (void *)((uintptr_t)rb->data +
798 (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask));
803 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags)
812 consumer_pos = rb->consumer_pos;
814 smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size);
820 struct bpf_ringbuf *rb;
829 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
832 if (!atomic_try_cmpxchg(&rb->busy, &busy, 1))
841 err = __bpf_user_ringbuf_peek(rb, &sample, &size);
856 __bpf_user_ringbuf_sample_release(rb, size, flags);
862 * storing of any rb consumer or producer positions.
864 atomic_set_release(&rb->busy, 0);
867 irq_work_queue(&rb->work);
869 irq_work_queue(&rb->work);