Lines Matching refs:r
89 space_available(struct mp_ring *r, union ring_state s) in space_available() argument
91 uint16_t x = r->size - 1; in space_available()
102 increment_idx(struct mp_ring *r, uint16_t idx, uint16_t n) in increment_idx() argument
104 int x = r->size - idx; in increment_idx()
115 drain_ring(struct mp_ring *r, int budget) in drain_ring() argument
123 mtx_assert(r->cons_lock, MA_OWNED); in drain_ring()
125 os.state = atomic_load_acq_64(&r->state); in drain_ring()
138 n = r->drain(r, cidx, pidx, &coalescing); in drain_ring()
141 os.state = atomic_load_64(&r->state); in drain_ring()
154 } while (atomic_fcmpset_64(&r->state, &os.state, in drain_ring()
158 counter_u64_add(r->abdications, 1); in drain_ring()
160 counter_u64_add(r->stalls, 1); in drain_ring()
163 cidx = increment_idx(r, cidx, n); in drain_ring()
166 counter_u64_add(r->consumed, n); in drain_ring()
168 os.state = atomic_load_64(&r->state); in drain_ring()
191 } while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0); in drain_ring()
195 counter_u64_add(r->abdications, 1); in drain_ring()
202 counter_u64_add(r->cons_idle2, 1); in drain_ring()
204 counter_u64_add(r->cons_idle, 1); in drain_ring()
227 drain_txpkts(struct mp_ring *r, union ring_state os, int budget) in drain_txpkts() argument
234 mtx_assert(r->cons_lock, MA_OWNED); in drain_txpkts()
238 r->drain(r, cidx, pidx, &coalescing); in drain_txpkts()
241 os.state = atomic_load_64(&r->state); in drain_txpkts()
250 } while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0); in drain_txpkts()
254 drain_ring(r, budget); in drain_txpkts()
262 struct mp_ring *r; in mp_ring_alloc() local
273 r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO); in mp_ring_alloc()
274 if (r == NULL) in mp_ring_alloc()
276 r->size = size; in mp_ring_alloc()
277 r->cookie = cookie; in mp_ring_alloc()
278 r->mt = mt; in mp_ring_alloc()
279 r->drain = drain; in mp_ring_alloc()
280 r->can_drain = can_drain; in mp_ring_alloc()
281 r->cons_lock = lck; in mp_ring_alloc()
282 if ((r->dropped = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
284 for (i = 0; i < nitems(r->consumer); i++) { in mp_ring_alloc()
285 if ((r->consumer[i] = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
288 if ((r->not_consumer = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
290 if ((r->abdications = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
292 if ((r->stalls = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
294 if ((r->consumed = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
296 if ((r->cons_idle = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
298 if ((r->cons_idle2 = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
300 *pr = r; in mp_ring_alloc()
303 mp_ring_free(r); in mp_ring_alloc()
309 mp_ring_free(struct mp_ring *r) in mp_ring_free() argument
313 if (r == NULL) in mp_ring_free()
316 if (r->dropped != NULL) in mp_ring_free()
317 counter_u64_free(r->dropped); in mp_ring_free()
318 for (i = 0; i < nitems(r->consumer); i++) { in mp_ring_free()
319 if (r->consumer[i] != NULL) in mp_ring_free()
320 counter_u64_free(r->consumer[i]); in mp_ring_free()
322 if (r->not_consumer != NULL) in mp_ring_free()
323 counter_u64_free(r->not_consumer); in mp_ring_free()
324 if (r->abdications != NULL) in mp_ring_free()
325 counter_u64_free(r->abdications); in mp_ring_free()
326 if (r->stalls != NULL) in mp_ring_free()
327 counter_u64_free(r->stalls); in mp_ring_free()
328 if (r->consumed != NULL) in mp_ring_free()
329 counter_u64_free(r->consumed); in mp_ring_free()
330 if (r->cons_idle != NULL) in mp_ring_free()
331 counter_u64_free(r->cons_idle); in mp_ring_free()
332 if (r->cons_idle2 != NULL) in mp_ring_free()
333 counter_u64_free(r->cons_idle2); in mp_ring_free()
335 free(r, r->mt); in mp_ring_free()
344 mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget) in mp_ring_enqueue() argument
359 os.state = atomic_load_64(&r->state); in mp_ring_enqueue()
362 if (__predict_true(space_available(r, os) >= n)) in mp_ring_enqueue()
370 counter_u64_add(r->dropped, n); in mp_ring_enqueue()
374 mp_ring_check_drainage(r, 64); in mp_ring_enqueue()
377 os.state = atomic_load_64(&r->state); in mp_ring_enqueue()
384 ns.pidx_head = increment_idx(r, os.pidx_head, n); in mp_ring_enqueue()
389 ns.pidx_tail = increment_idx(r, os.pidx_tail, n); in mp_ring_enqueue()
398 if (atomic_fcmpset_64(&r->state, &os.state, ns.state)) in mp_ring_enqueue()
410 r->items[i] = *items++; in mp_ring_enqueue()
411 if (__predict_false(++i == r->size)) in mp_ring_enqueue()
415 counter_u64_add(r->consumer[C_FAST], 1); in mp_ring_enqueue()
416 mtx_lock(r->cons_lock); in mp_ring_enqueue()
417 drain_ring(r, budget); in mp_ring_enqueue()
418 mtx_unlock(r->cons_lock); in mp_ring_enqueue()
429 ns.state = atomic_load_64(&r->state); in mp_ring_enqueue()
435 r->items[i] = *items++; in mp_ring_enqueue()
436 if (__predict_false(++i == r->size)) in mp_ring_enqueue()
444 os.state = atomic_load_64(&r->state); in mp_ring_enqueue()
450 (os.flags == STALLED && r->can_drain(r))) { in mp_ring_enqueue()
455 } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0); in mp_ring_enqueue()
462 counter_u64_add(r->not_consumer, 1); in mp_ring_enqueue()
466 MPASS(cons > C_FAST && cons < nitems(r->consumer)); in mp_ring_enqueue()
467 counter_u64_add(r->consumer[cons], 1); in mp_ring_enqueue()
468 mtx_lock(r->cons_lock); in mp_ring_enqueue()
469 drain_ring(r, budget); in mp_ring_enqueue()
470 mtx_unlock(r->cons_lock); in mp_ring_enqueue()
476 mp_ring_check_drainage(struct mp_ring *r, int budget) in mp_ring_check_drainage() argument
480 os.state = atomic_load_64(&r->state); in mp_ring_check_drainage()
481 if (os.flags == STALLED && r->can_drain(r)) { in mp_ring_check_drainage()
485 if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) { in mp_ring_check_drainage()
486 mtx_lock(r->cons_lock); in mp_ring_check_drainage()
487 drain_ring(r, budget); in mp_ring_check_drainage()
488 mtx_unlock(r->cons_lock); in mp_ring_check_drainage()
494 if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) { in mp_ring_check_drainage()
495 mtx_lock(r->cons_lock); in mp_ring_check_drainage()
496 drain_txpkts(r, ns, budget); in mp_ring_check_drainage()
497 mtx_unlock(r->cons_lock); in mp_ring_check_drainage()
503 mp_ring_reset_stats(struct mp_ring *r) in mp_ring_reset_stats() argument
507 counter_u64_zero(r->dropped); in mp_ring_reset_stats()
508 for (i = 0; i < nitems(r->consumer); i++) in mp_ring_reset_stats()
509 counter_u64_zero(r->consumer[i]); in mp_ring_reset_stats()
510 counter_u64_zero(r->not_consumer); in mp_ring_reset_stats()
511 counter_u64_zero(r->abdications); in mp_ring_reset_stats()
512 counter_u64_zero(r->stalls); in mp_ring_reset_stats()
513 counter_u64_zero(r->consumed); in mp_ring_reset_stats()
514 counter_u64_zero(r->cons_idle); in mp_ring_reset_stats()
515 counter_u64_zero(r->cons_idle2); in mp_ring_reset_stats()
519 mp_ring_is_idle(struct mp_ring *r) in mp_ring_is_idle() argument
523 s.state = atomic_load_64(&r->state); in mp_ring_is_idle()
532 mp_ring_sysctls(struct mp_ring *r, struct sysctl_ctx_list *ctx, in mp_ring_sysctls() argument
542 __DEVOLATILE(uint64_t *, &r->state), 0, "ring state"); in mp_ring_sysctls()
544 &r->dropped, "# of items dropped"); in mp_ring_sysctls()
546 CTLFLAG_RD, &r->consumed, "# of items consumed"); in mp_ring_sysctls()
548 CTLFLAG_RD, &r->consumer[C_FAST], in mp_ring_sysctls()
551 CTLFLAG_RD, &r->consumer[C_2], in mp_ring_sysctls()
554 CTLFLAG_RD, &r->consumer[C_3], in mp_ring_sysctls()
557 CTLFLAG_RD, &r->consumer[C_TAKEOVER], in mp_ring_sysctls()
560 CTLFLAG_RD, &r->not_consumer, in mp_ring_sysctls()
563 CTLFLAG_RD, &r->abdications, "# of consumer abdications"); in mp_ring_sysctls()
565 CTLFLAG_RD, &r->stalls, "# of consumer stalls"); in mp_ring_sysctls()
567 CTLFLAG_RD, &r->cons_idle, in mp_ring_sysctls()
570 CTLFLAG_RD, &r->cons_idle2, in mp_ring_sysctls()