Searched refs:cq_entries (Results 1 – 7 of 7) sorted by relevance
/linux/io_uring/ |
H A D | register.c | 426 if (p.sq_entries == ctx->sq_entries && p.cq_entries == ctx->cq_entries) { in io_register_resize_rings() 432 size = rings_size(p.flags, p.sq_entries, p.cq_entries, in io_register_resize_rings() 459 WRITE_ONCE(n.rings->cq_ring_mask, p.cq_entries - 1); in io_register_resize_rings() 461 WRITE_ONCE(n.rings->cq_ring_entries, p.cq_entries); in io_register_resize_rings() 534 if (tail - old_head > p.cq_entries) { in io_register_resize_rings() 544 unsigned src_head = i & (ctx->cq_entries - 1); in io_register_resize_rings() 545 unsigned dst_head = i & (p.cq_entries - 1); in io_register_resize_rings() 564 ctx->cq_entries = p.cq_entries; in io_register_resize_rings()
|
H A D | io_uring.c | 302 hash_bits = ilog2(p->cq_entries) - 5; in io_ring_ctx_alloc() 625 if (!dying && __io_cqring_events(ctx) == ctx->cq_entries) in __io_cqring_overflow_flush() 775 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); in io_cqe_cache_refill() 787 queued = min(__io_cqring_events(ctx), ctx->cq_entries); in io_cqe_cache_refill() 788 free = ctx->cq_entries - queued; in io_cqe_cache_refill() 790 len = min(free, ctx->cq_entries - off); in io_cqe_cache_refill() 2645 unsigned int cq_entries, size_t *sq_offset) in rings_size() argument 2650 off = struct_size(rings, cqes, cq_entries); in rings_size() 3405 ctx->cq_entries); in SYSCALL_DEFINE6() 3415 ctx->cq_entries); in SYSCALL_DEFINE6() [all …]
|
H A D | io_uring.h | 74 unsigned int cq_entries, size_t *sq_offset);
|
/linux/tools/include/uapi/linux/ |
H A D | io_uring.h | 487 __u32 cq_entries; member
|
/linux/tools/include/io_uring/ |
H A D | mini_liburing.h | 98 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap()
|
/linux/include/linux/ |
H A D | io_uring_types.h | 332 unsigned cq_entries; member
|
/linux/include/uapi/linux/ |
H A D | io_uring.h | 550 __u32 cq_entries; member
|