Lines Matching defs:rings

184 	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
189 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
583 /* IOPOLL rings only need to wake up if it's also SQPOLL */
647 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
654 if (ctx->rings)
707 struct io_rings *r = ctx->rings;
720 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
761 struct io_rings *rings = ctx->rings;
786 ctx->cqe_cached = &rings->cqes[off];
1072 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1245 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1270 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1316 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1351 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
2319 struct io_rings *rings = ctx->rings;
2326 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2346 WRITE_ONCE(ctx->rings->sq_dropped,
2347 READ_ONCE(ctx->rings->sq_dropped) + 1);
2484 if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
2597 struct io_rings *rings = ctx->rings;
2619 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2620 iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
2656 READ_ONCE(ctx->rings->cq.tail);
2715 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2722 ctx->rings = NULL;
2729 struct io_rings *rings;
2732 off = struct_size(rings, cqes, cq_entries);
3051 * if we're exiting a ton of rings at the same time. It just adds
3146 if (!ctx->rings)
3153 * Cancels requests of all rings, not only @ctx, but
3549 struct io_rings *rings;
3571 ctx->rings = rings = io_region_get_ptr(&ctx->ring_region);
3574 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3575 rings->sq_ring_mask = p->sq_entries - 1;
3576 rings->cq_ring_mask = p->cq_entries - 1;
3577 rings->sq_ring_entries = p->sq_entries;
3578 rings->cq_ring_entries = p->cq_entries;
3631 /* There is no way to mmap rings without a real fd */
3806 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;