Lines Matching full:flags
49 * isn't one already. The consumer runs with the flags sets to BUSY and
51 * over its budget it sets flags to TOO_BUSY. A producer that observes a
52 * TOO_BUSY consumer will become the new consumer by setting flags to
53 * TAKING_OVER. The original consumer stops and sets the flags back to BUSY for
67 uint16_t flags; member
126 MPASS(os.flags == BUSY); in drain_ring()
146 MPASS(os.flags == BUSY || in drain_ring()
147 os.flags == TOO_BUSY || in drain_ring()
148 os.flags == TAKING_OVER); in drain_ring()
150 if (os.flags == TAKING_OVER) in drain_ring()
151 ns.flags = BUSY; in drain_ring()
153 ns.flags = STALLED; in drain_ring()
157 if (os.flags == TAKING_OVER) in drain_ring()
159 else if (ns.flags == STALLED) in drain_ring()
170 MPASS(os.flags == BUSY || os.flags == TOO_BUSY || in drain_ring()
171 os.flags == TAKING_OVER); in drain_ring()
175 if (__predict_false(os.flags == TAKING_OVER)) { in drain_ring()
177 ns.flags = BUSY; in drain_ring()
181 ns.flags = coalescing ? COALESCING : IDLE; in drain_ring()
185 ns.flags = TOO_BUSY; in drain_ring()
188 MPASS(os.flags == BUSY); in drain_ring()
193 if (__predict_false(os.flags == TAKING_OVER)) { in drain_ring()
194 MPASS(ns.flags == BUSY); in drain_ring()
199 if (ns.flags == IDLE || ns.flags == COALESCING) { in drain_ring()
217 if (os.flags == TAKING_OVER) in drain_ring()
218 MPASS(ns.flags == BUSY); in drain_ring()
220 MPASS(ns.flags == IDLE || ns.flags == COALESCING || in drain_ring()
221 ns.flags == STALLED); in drain_ring()
235 MPASS(os.flags == BUSY); in drain_txpkts()
244 MPASS(os.flags == BUSY); in drain_txpkts()
247 ns.flags = IDLE; in drain_txpkts()
249 ns.flags = BUSY; in drain_txpkts()
253 if (ns.flags == BUSY) in drain_txpkts()
260 int flags) in mp_ring_alloc() argument
270 flags &= M_NOWAIT | M_WAITOK; in mp_ring_alloc()
271 MPASS(flags != 0); in mp_ring_alloc()
273 r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO); in mp_ring_alloc()
282 if ((r->dropped = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
285 if ((r->consumer[i] = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
288 if ((r->not_consumer = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
290 if ((r->abdications = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
292 if ((r->stalls = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
294 if ((r->consumed = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
296 if ((r->cons_idle = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
298 if ((r->cons_idle2 = counter_u64_alloc(flags)) == NULL) in mp_ring_alloc()
367 MPASS(os.flags != IDLE); in mp_ring_enqueue()
368 MPASS(os.flags != COALESCING); in mp_ring_enqueue()
373 if (os.flags == STALLED) in mp_ring_enqueue()
385 if (os.flags == IDLE || os.flags == COALESCING) { in mp_ring_enqueue()
392 ns.flags = BUSY; in mp_ring_enqueue()
393 } else if (os.flags == TOO_BUSY) { in mp_ring_enqueue()
395 ns.flags = TAKING_OVER; in mp_ring_enqueue()
449 if (os.flags == IDLE || os.flags == COALESCING || in mp_ring_enqueue()
450 (os.flags == STALLED && r->can_drain(r))) { in mp_ring_enqueue()
453 ns.flags = BUSY; in mp_ring_enqueue()
481 if (os.flags == STALLED && r->can_drain(r)) { in mp_ring_check_drainage()
484 ns.flags = BUSY; in mp_ring_check_drainage()
490 } else if (os.flags == COALESCING) { in mp_ring_check_drainage()
493 ns.flags = BUSY; in mp_ring_check_drainage()
525 s.flags == IDLE) in mp_ring_is_idle()