Lines Matching +full:idle +full:- +full:state

1 /*-
50 * consumes everything (IDLE or COALESCING) or gets STALLED. If it is running
56 * COALESCING is the same as IDLE except there are items being held in the hope
69 uint64_t state; member
73 IDLE = 0, /* tx is all caught up, nothing to do. */ enumerator
74 COALESCING, /* IDLE, but tx frames are being held for coalescing */
91 uint16_t x = r->size - 1; in space_available()
96 return (s.cidx - s.pidx_head - 1); in space_available()
98 return (x - s.pidx_head + s.cidx); in space_available()
104 int x = r->size - idx; in increment_idx()
107 return (x > n ? idx + n : n - x); in increment_idx()
123 mtx_assert(r->cons_lock, MA_OWNED); in drain_ring()
125 os.state = atomic_load_acq_64(&r->state); in drain_ring()
138 n = r->drain(r, cidx, pidx, &coalescing); in drain_ring()
141 os.state = atomic_load_64(&r->state); in drain_ring()
143 ns.state = os.state; in drain_ring()
154 } while (atomic_fcmpset_64(&r->state, &os.state, in drain_ring()
155 ns.state) == 0); in drain_ring()
158 counter_u64_add(r->abdications, 1); in drain_ring()
160 counter_u64_add(r->stalls, 1); in drain_ring()
166 counter_u64_add(r->consumed, n); in drain_ring()
168 os.state = atomic_load_64(&r->state); in drain_ring()
173 ns.state = os.state; in drain_ring()
181 ns.flags = coalescing ? COALESCING : IDLE; in drain_ring()
191 } while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0); in drain_ring()
195 counter_u64_add(r->abdications, 1); in drain_ring()
199 if (ns.flags == IDLE || ns.flags == COALESCING) { in drain_ring()
202 counter_u64_add(r->cons_idle2, 1); in drain_ring()
204 counter_u64_add(r->cons_idle, 1); in drain_ring()
220 MPASS(ns.flags == IDLE || ns.flags == COALESCING || in drain_ring()
234 mtx_assert(r->cons_lock, MA_OWNED); in drain_txpkts()
238 r->drain(r, cidx, pidx, &coalescing); in drain_txpkts()
241 os.state = atomic_load_64(&r->state); in drain_txpkts()
243 ns.state = os.state; in drain_txpkts()
247 ns.flags = IDLE; in drain_txpkts()
250 } while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0); in drain_txpkts()
276 r->size = size; in mp_ring_alloc()
277 r->cookie = cookie; in mp_ring_alloc()
278 r->mt = mt; in mp_ring_alloc()
279 r->drain = drain; in mp_ring_alloc()
280 r->can_drain = can_drain; in mp_ring_alloc()
281 r->cons_lock = lck; in mp_ring_alloc()
282 if ((r->dropped = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
284 for (i = 0; i < nitems(r->consumer); i++) { in mp_ring_alloc()
285 if ((r->consumer[i] = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
288 if ((r->not_consumer = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
290 if ((r->abdications = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
292 if ((r->stalls = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
294 if ((r->consumed = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
296 if ((r->cons_idle = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
298 if ((r->cons_idle2 = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
316 if (r->dropped != NULL) in mp_ring_free()
317 counter_u64_free(r->dropped); in mp_ring_free()
318 for (i = 0; i < nitems(r->consumer); i++) { in mp_ring_free()
319 if (r->consumer[i] != NULL) in mp_ring_free()
320 counter_u64_free(r->consumer[i]); in mp_ring_free()
322 if (r->not_consumer != NULL) in mp_ring_free()
323 counter_u64_free(r->not_consumer); in mp_ring_free()
324 if (r->abdications != NULL) in mp_ring_free()
325 counter_u64_free(r->abdications); in mp_ring_free()
326 if (r->stalls != NULL) in mp_ring_free()
327 counter_u64_free(r->stalls); in mp_ring_free()
328 if (r->consumed != NULL) in mp_ring_free()
329 counter_u64_free(r->consumed); in mp_ring_free()
330 if (r->cons_idle != NULL) in mp_ring_free()
331 counter_u64_free(r->cons_idle); in mp_ring_free()
332 if (r->cons_idle2 != NULL) in mp_ring_free()
333 counter_u64_free(r->cons_idle2); in mp_ring_free()
335 free(r, r->mt); in mp_ring_free()
359 os.state = atomic_load_64(&r->state); in mp_ring_enqueue()
367 MPASS(os.flags != IDLE); in mp_ring_enqueue()
370 counter_u64_add(r->dropped, n); in mp_ring_enqueue()
377 os.state = atomic_load_64(&r->state); in mp_ring_enqueue()
382 cons = -1; in mp_ring_enqueue()
383 ns.state = os.state; in mp_ring_enqueue()
385 if (os.flags == IDLE || os.flags == COALESCING) { in mp_ring_enqueue()
398 if (atomic_fcmpset_64(&r->state, &os.state, ns.state)) in mp_ring_enqueue()
410 r->items[i] = *items++; in mp_ring_enqueue()
411 if (__predict_false(++i == r->size)) in mp_ring_enqueue()
415 counter_u64_add(r->consumer[C_FAST], 1); in mp_ring_enqueue()
416 mtx_lock(r->cons_lock); in mp_ring_enqueue()
418 mtx_unlock(r->cons_lock); in mp_ring_enqueue()
429 ns.state = atomic_load_64(&r->state); in mp_ring_enqueue()
435 r->items[i] = *items++; in mp_ring_enqueue()
436 if (__predict_false(++i == r->size)) in mp_ring_enqueue()
444 os.state = atomic_load_64(&r->state); in mp_ring_enqueue()
447 ns.state = os.state; in mp_ring_enqueue()
449 if (os.flags == IDLE || os.flags == COALESCING || in mp_ring_enqueue()
450 (os.flags == STALLED && r->can_drain(r))) { in mp_ring_enqueue()
451 MPASS(cons == -1); in mp_ring_enqueue()
455 } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0); in mp_ring_enqueue()
458 if (cons == -1) { in mp_ring_enqueue()
462 counter_u64_add(r->not_consumer, 1); in mp_ring_enqueue()
466 MPASS(cons > C_FAST && cons < nitems(r->consumer)); in mp_ring_enqueue()
467 counter_u64_add(r->consumer[cons], 1); in mp_ring_enqueue()
468 mtx_lock(r->cons_lock); in mp_ring_enqueue()
470 mtx_unlock(r->cons_lock); in mp_ring_enqueue()
480 os.state = atomic_load_64(&r->state); in mp_ring_check_drainage()
481 if (os.flags == STALLED && r->can_drain(r)) { in mp_ring_check_drainage()
483 ns.state = os.state; in mp_ring_check_drainage()
485 if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) { in mp_ring_check_drainage()
486 mtx_lock(r->cons_lock); in mp_ring_check_drainage()
488 mtx_unlock(r->cons_lock); in mp_ring_check_drainage()
492 ns.state = os.state; in mp_ring_check_drainage()
494 if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) { in mp_ring_check_drainage()
495 mtx_lock(r->cons_lock); in mp_ring_check_drainage()
497 mtx_unlock(r->cons_lock); in mp_ring_check_drainage()
507 counter_u64_zero(r->dropped); in mp_ring_reset_stats()
508 for (i = 0; i < nitems(r->consumer); i++) in mp_ring_reset_stats()
509 counter_u64_zero(r->consumer[i]); in mp_ring_reset_stats()
510 counter_u64_zero(r->not_consumer); in mp_ring_reset_stats()
511 counter_u64_zero(r->abdications); in mp_ring_reset_stats()
512 counter_u64_zero(r->stalls); in mp_ring_reset_stats()
513 counter_u64_zero(r->consumed); in mp_ring_reset_stats()
514 counter_u64_zero(r->cons_idle); in mp_ring_reset_stats()
515 counter_u64_zero(r->cons_idle2); in mp_ring_reset_stats()
523 s.state = atomic_load_64(&r->state); in mp_ring_is_idle()
525 s.flags == IDLE) in mp_ring_is_idle()
541 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "state", CTLFLAG_RD, in mp_ring_sysctls()
542 __DEVOLATILE(uint64_t *, &r->state), 0, "ring state"); in mp_ring_sysctls()
544 &r->dropped, "# of items dropped"); in mp_ring_sysctls()
546 CTLFLAG_RD, &r->consumed, "# of items consumed"); in mp_ring_sysctls()
548 CTLFLAG_RD, &r->consumer[C_FAST], in mp_ring_sysctls()
551 CTLFLAG_RD, &r->consumer[C_2], in mp_ring_sysctls()
554 CTLFLAG_RD, &r->consumer[C_3], in mp_ring_sysctls()
557 CTLFLAG_RD, &r->consumer[C_TAKEOVER], in mp_ring_sysctls()
560 CTLFLAG_RD, &r->not_consumer, in mp_ring_sysctls()
563 CTLFLAG_RD, &r->abdications, "# of consumer abdications"); in mp_ring_sysctls()
565 CTLFLAG_RD, &r->stalls, "# of consumer stalls"); in mp_ring_sysctls()
567 CTLFLAG_RD, &r->cons_idle, in mp_ring_sysctls()
570 CTLFLAG_RD, &r->cons_idle2, in mp_ring_sysctls()