Lines Matching refs:rings

192 	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);  in __io_cqring_events()
197 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events_user()
635 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in __io_cqring_overflow_flush()
642 if (ctx->rings) in io_cqring_overflow_kill()
695 struct io_rings *r = ctx->rings; in io_cqring_add_overflow()
708 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in io_cqring_add_overflow()
751 struct io_uring_cqe *cqe = &ctx->rings->cqes[off]; in io_fill_nop_cqe()
769 struct io_rings *rings = ctx->rings; in io_cqe_cache_refill() local
804 ctx->cqe_cached = &rings->cqes[off]; in io_cqe_cache_refill()
1091 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in ctx_flush_and_put()
1260 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in io_req_local_work_add()
1285 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in io_req_normal_work_add()
1331 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in io_run_local_work_continue()
1366 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in __io_run_local_work()
2355 struct io_rings *rings = ctx->rings; in io_commit_sqring() local
2362 smp_store_release(&rings->sq.head, ctx->cached_sq_head); in io_commit_sqring()
2382 WRITE_ONCE(ctx->rings->sq_dropped, in io_get_sqe()
2383 READ_ONCE(ctx->rings->sq_dropped) + 1); in io_get_sqe()
2521 if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail)) in io_cqring_min_timer_wakeup()
2637 struct io_rings *rings = ctx->rings; in io_cqring_wait() local
2659 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; in io_cqring_wait()
2660 iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail); in io_cqring_wait()
2696 READ_ONCE(ctx->rings->cq.tail); in io_cqring_wait()
2755 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; in io_cqring_wait()
2762 ctx->rings = NULL; in io_rings_free()
2769 struct io_rings *rings; in rings_size() local
2792 off = struct_size(rings, cqes, cq_entries); in rings_size()
3371 struct io_rings *rings; in io_allocate_scq_urings() local
3387 ctx->rings = rings = io_region_get_ptr(&ctx->ring_region); in io_allocate_scq_urings()
3389 ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset); in io_allocate_scq_urings()
3404 memset(rings, 0, sizeof(*rings)); in io_allocate_scq_urings()
3405 WRITE_ONCE(rings->sq_ring_mask, ctx->sq_entries - 1); in io_allocate_scq_urings()
3406 WRITE_ONCE(rings->cq_ring_mask, ctx->cq_entries - 1); in io_allocate_scq_urings()
3407 WRITE_ONCE(rings->sq_ring_entries, ctx->sq_entries); in io_allocate_scq_urings()
3408 WRITE_ONCE(rings->cq_ring_entries, ctx->cq_entries); in io_allocate_scq_urings()