Searched refs:sq_entries (Results 1 – 9 of 9) sorted by relevance
/linux/io_uring/ |
H A D | register.c | 421 ret = io_uring_fill_params(p.sq_entries, &p); in io_register_resize_rings() 426 if (p.sq_entries == ctx->sq_entries && p.cq_entries == ctx->cq_entries) { in io_register_resize_rings() 432 size = rings_size(p.flags, p.sq_entries, p.cq_entries, in io_register_resize_rings() 458 WRITE_ONCE(n.rings->sq_ring_mask, p.sq_entries - 1); in io_register_resize_rings() 460 WRITE_ONCE(n.rings->sq_ring_entries, p.sq_entries); in io_register_resize_rings() 469 size = array_size(2 * sizeof(struct io_uring_sqe), p.sq_entries); in io_register_resize_rings() 471 size = array_size(sizeof(struct io_uring_sqe), p.sq_entries); in io_register_resize_rings() 521 if (tail - old_head > p.sq_entries) in io_register_resize_rings() 524 unsigned src_head = i & (ctx->sq_entries - 1); in io_register_resize_rings() 525 unsigned dst_head = i & (p.sq_entries - 1); in io_register_resize_rings() [all …]
|
H A D | io_uring.h | 73 unsigned long rings_size(unsigned int flags, unsigned int sq_entries, 318 return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries; in io_sqring_full() 328 return min(entries, ctx->sq_entries); in io_sqring_entries()
|
H A D | io_uring.c | 2258 unsigned mask = ctx->sq_entries - 1; in io_get_sqe() 2264 if (unlikely(head >= ctx->sq_entries)) { in io_get_sqe() 2273 head = array_index_nospec(head, ctx->sq_entries); in io_get_sqe() 2644 unsigned long rings_size(unsigned int flags, unsigned int sq_entries, in rings_size() argument 2671 sq_array_size = array_size(sizeof(u32), sq_entries); in rings_size() 3467 ctx->sq_entries = p->sq_entries; in io_allocate_scq_urings() 3470 size = rings_size(ctx->flags, p->sq_entries, p->cq_entries, in io_allocate_scq_urings() 3488 rings->sq_ring_mask = p->sq_entries - 1; in io_allocate_scq_urings() 3490 rings->sq_ring_entries = p->sq_entries; in io_allocate_scq_urings() 3494 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); in io_allocate_scq_urings() [all …]
|
H A D | tctx.c | 42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); in io_init_wq_offload()
|
/linux/tools/include/io_uring/ |
H A D | mini_liburing.h | 75 sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned int); in io_uring_mmap() 88 size = p->sq_entries * sizeof(struct io_uring_sqe); in io_uring_mmap() 103 munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe)); in io_uring_mmap()
|
/linux/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_main.c | 53 static int sq_entries = EHEA_DEF_ENTRIES_SQ; variable 61 module_param(sq_entries, int, 0); 77 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " 2255 pr_cfg.max_entries_scq = sq_entries * 2; in ehea_port_res_setup() 2256 pr_cfg.max_entries_sq = sq_entries; in ehea_port_res_setup() 2262 pr_cfg_small_rx.max_entries_scq = sq_entries; in ehea_port_res_setup() 2263 pr_cfg_small_rx.max_entries_sq = sq_entries; in ehea_port_res_setup() 2963 port->sig_comp_iv = sq_entries / 10; in ehea_setup_single_port() 3514 if ((sq_entries < EHEA_MIN_ENTRIES_QP) || in check_module_parm() 3515 (sq_entries > EHEA_MAX_ENTRIES_SQ)) { in check_module_parm()
|
/linux/tools/include/uapi/linux/ |
H A D | io_uring.h | 486 __u32 sq_entries; member
|
/linux/include/linux/ |
H A D | io_uring_types.h | 276 unsigned sq_entries; member
|
/linux/include/uapi/linux/ |
H A D | io_uring.h | 549 __u32 sq_entries; member
|