Home
last modified time | relevance | path

Searched refs:sqe (Results 1 – 25 of 72) sorted by relevance

123

/linux/io_uring/
H A Dfs.c50 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_renameat_prep() argument
55 if (sqe->buf_index || sqe->splice_fd_in) in io_renameat_prep()
60 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep()
61 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_renameat_prep()
62 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_renameat_prep()
63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep()
64 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep()
104 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_unlinkat_prep() argument
109 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in) in io_unlinkat_prep()
114 un->dfd = READ_ONCE(sqe->fd); in io_unlinkat_prep()
[all …]
H A Dsync.c25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
29 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) in io_sfr_prep()
32 sync->off = READ_ONCE(sqe->off); in io_sfr_prep()
33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep()
34 sync->flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
53 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
57 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) in io_fsync_prep()
60 sync->flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
64 sync->off = READ_ONCE(sqe->off); in io_fsync_prep()
65 sync->len = READ_ONCE(sqe->len); in io_fsync_prep()
[all …]
H A Dopenclose.c50 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
56 if (unlikely(sqe->buf_index)) in __io_openat_prep()
65 open->dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
66 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_openat_prep()
74 open->file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
85 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
88 u64 mode = READ_ONCE(sqe->len); in io_openat_prep()
89 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep()
92 return __io_openat_prep(req, sqe); in io_openat_prep()
95 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
[all …]
H A Dadvise.c31 int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
36 if (sqe->buf_index || sqe->splice_fd_in) in io_madvise_prep()
39 ma->addr = READ_ONCE(sqe->addr); in io_madvise_prep()
40 ma->len = READ_ONCE(sqe->off); in io_madvise_prep()
42 ma->len = READ_ONCE(sqe->len); in io_madvise_prep()
43 ma->advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
79 int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
83 if (sqe->buf_index || sqe->splice_fd_in) in io_fadvise_prep()
86 fa->offset = READ_ONCE(sqe->off); in io_fadvise_prep()
87 fa->len = READ_ONCE(sqe->addr); in io_fadvise_prep()
[all …]
H A Dtruncate.c23 int io_ftruncate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_ftruncate_prep() argument
27 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->buf_index || in io_ftruncate_prep()
28 sqe->splice_fd_in || sqe->addr3) in io_ftruncate_prep()
31 ft->len = READ_ONCE(sqe->off); in io_ftruncate_prep()
H A Dnet.c91 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() argument
95 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || in io_shutdown_prep()
96 sqe->buf_index || sqe->splice_fd_in)) in io_shutdown_prep()
99 shutdown->how = READ_ONCE(sqe->len); in io_shutdown_prep()
349 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_send_setup() argument
357 sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_send_setup()
359 if (READ_ONCE(sqe->__pad3[0])) in io_send_setup()
368 addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_send_setup()
369 addr_len = READ_ONCE(sqe->addr_len); in io_send_setup()
386 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_setup() argument
[all …]
H A During_cmd.c32 ioucmd->sqe = NULL; in io_req_uring_cleanup()
169 const struct io_uring_sqe *sqe) in io_uring_cmd_prep_setup() argument
186 memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx)); in io_uring_cmd_prep_setup()
187 ioucmd->sqe = cache->sqes; in io_uring_cmd_prep_setup()
191 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_uring_cmd_prep() argument
195 if (sqe->__pad1) in io_uring_cmd_prep()
198 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags); in io_uring_cmd_prep()
205 u16 index = READ_ONCE(sqe->buf_index); in io_uring_cmd_prep()
217 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); in io_uring_cmd_prep()
219 return io_uring_cmd_prep_setup(req, sqe); in io_uring_cmd_prep()
[all …]
H A Dfutex.c164 int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_futex_prep() argument
169 if (unlikely(sqe->len || sqe->futex_flags || sqe->buf_index || in io_futex_prep()
170 sqe->file_index)) in io_futex_prep()
173 iof->uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_futex_prep()
174 iof->futex_val = READ_ONCE(sqe->addr2); in io_futex_prep()
175 iof->futex_mask = READ_ONCE(sqe->addr3); in io_futex_prep()
176 flags = READ_ONCE(sqe->fd); in io_futex_prep()
207 int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_futexv_prep() argument
214 if (unlikely(sqe->fd || sqe->buf_index || sqe->file_index || in io_futexv_prep()
215 sqe->addr2 || sqe->futex_flags || sqe->addr3)) in io_futexv_prep()
[all …]
H A Depoll.c24 int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_epoll_ctl_prep() argument
28 if (sqe->buf_index || sqe->splice_fd_in) in io_epoll_ctl_prep()
31 epoll->epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
32 epoll->op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
33 epoll->fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
38 ev = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_epoll_ctl_prep()
H A Dfs.h3 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
15 int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
18 int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
H A Dxattr.h5 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
8 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
14 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
H A Dopenclose.h6 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
10 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
13 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
16 int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
H A Dtimeout.c439 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_timeout_remove_prep() argument
445 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) in io_timeout_remove_prep()
449 tr->addr = READ_ONCE(sqe->addr); in io_timeout_remove_prep()
450 tr->flags = READ_ONCE(sqe->timeout_flags); in io_timeout_remove_prep()
458 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) in io_timeout_remove_prep()
509 const struct io_uring_sqe *sqe, in __io_timeout_prep() argument
515 u32 off = READ_ONCE(sqe->off); in __io_timeout_prep()
517 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) in __io_timeout_prep()
521 flags = READ_ONCE(sqe->timeout_flags); in __io_timeout_prep()
553 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) in __io_timeout_prep()
[all …]
H A Dnop.c26 int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_nop_prep() argument
30 nop->flags = READ_ONCE(sqe->nop_flags); in io_nop_prep()
35 nop->result = READ_ONCE(sqe->len); in io_nop_prep()
39 nop->fd = READ_ONCE(sqe->fd); in io_nop_prep()
43 nop->buffer = READ_ONCE(sqe->buf_index); in io_nop_prep()
H A Drw.c270 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
278 rw->kiocb.ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
280 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
282 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
300 rw->addr = READ_ONCE(sqe->addr); in io_prep_rw()
301 rw->len = READ_ONCE(sqe->len); in io_prep_rw()
302 rw->flags = READ_ONCE(sqe->rw_flags); in io_prep_rw()
308 attr_type_mask = READ_ONCE(sqe->attr_type_mask); in io_prep_rw()
316 attr_ptr = READ_ONCE(sqe->attr_ptr); in io_prep_rw()
322 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_read() argument
[all …]
/linux/drivers/crypto/hisilicon/zip/
H A Dzip_crypto.c71 void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
72 void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
73 void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
74 void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
75 void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
76 void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
77 u32 (*get_tag)(struct hisi_zip_sqe *sqe);
78 u32 (*get_status)(struct hisi_zip_sqe *sqe);
79 u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
148 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) in hisi_zip_fill_addr() argument
[all …]
/linux/drivers/infiniband/sw/siw/
H A Dsiw_qp.c278 wqe->sqe.flags = 0; in siw_qp_mpa_rts()
279 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts()
280 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts()
281 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts()
282 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts()
287 wqe->sqe.rkey = 1; in siw_qp_mpa_rts()
288 wqe->sqe.raddr = 0; in siw_qp_mpa_rts()
292 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts()
296 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts()
303 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts()
[all …]
H A Dsiw_qp_tx.c52 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg()
55 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg()
62 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg()
135 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx()
137 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx()
138 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
139 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx()
140 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx()
183 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
195 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
[all …]
H A Dsiw_verbs.c655 struct siw_sqe *sqe) in siw_copy_inline_sgl() argument
658 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl()
661 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl()
662 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl()
680 sqe->sge[0].length = max(bytes, 0); in siw_copy_inline_sgl()
681 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl()
693 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local
697 sqe.opcode = SIW_OP_WRITE; in siw_sq_flush_wr()
700 sqe.opcode = SIW_OP_READ; in siw_sq_flush_wr()
703 sqe.opcode = SIW_OP_READ_LOCAL_INV; in siw_sq_flush_wr()
[all …]
/linux/include/net/libeth/
H A Dtx.h96 static inline void libeth_tx_complete(struct libeth_sqe *sqe, in libeth_tx_complete() argument
99 switch (sqe->type) { in libeth_tx_complete()
105 dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma), in libeth_tx_complete()
106 dma_unmap_len(sqe, len), DMA_TO_DEVICE); in libeth_tx_complete()
112 switch (sqe->type) { in libeth_tx_complete()
114 cp->ss->packets += sqe->packets; in libeth_tx_complete()
115 cp->ss->bytes += sqe->bytes; in libeth_tx_complete()
117 napi_consume_skb(sqe->skb, cp->napi); in libeth_tx_complete()
120 kfree(sqe->raw); in libeth_tx_complete()
126 sqe->type = LIBETH_SQE_EMPTY; in libeth_tx_complete()
/linux/tools/include/io_uring/
H A Dmini_liburing.h230 static inline void io_uring_prep_cmd(struct io_uring_sqe *sqe, int op, in io_uring_prep_cmd() argument
236 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_cmd()
237 sqe->opcode = (__u8)IORING_OP_URING_CMD; in io_uring_prep_cmd()
238 sqe->fd = sockfd; in io_uring_prep_cmd()
239 sqe->cmd_op = op; in io_uring_prep_cmd()
241 sqe->level = level; in io_uring_prep_cmd()
242 sqe->optname = optname; in io_uring_prep_cmd()
243 sqe->optval = (unsigned long long)optval; in io_uring_prep_cmd()
244 sqe->optlen = optlen; in io_uring_prep_cmd()
258 static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd, in io_uring_prep_send() argument
[all …]
/linux/drivers/net/ethernet/qlogic/qed/
H A Dqed_nvmetcp_fw_funcs.c68 if (!task_params->sqe) in init_sqe()
71 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe()
72 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe()
79 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe()
81 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
94 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); in init_sqe()
95 SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); in init_sqe()
99 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
101 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe()
106 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
[all …]
/linux/drivers/scsi/qedf/
H A Ddrv_fcoe_fw_funcs.c13 memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); in init_common_sqe()
14 SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, in init_common_sqe()
16 task_params->sqe->task_id = task_params->itid; in init_common_sqe()
167 task_params->sqe->additional_info_union.burst_length = in init_initiator_midpath_unsolicited_fcoe_task()
169 SET_FIELD(task_params->sqe->flags, in init_initiator_midpath_unsolicited_fcoe_task()
171 SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, in init_initiator_midpath_unsolicited_fcoe_task()
193 task_params->sqe->additional_info_union.seq_rec_updated_offset = in init_initiator_sequence_recovery_fcoe_task()
/linux/tools/testing/selftests/net/
H A Dio_uring_zerocopy_tx.c96 struct io_uring_sqe *sqe; in do_tx() local
132 sqe = io_uring_get_sqe(&ring); in do_tx()
135 io_uring_prep_send(sqe, fd, payload, in do_tx()
137 sqe->user_data = NONZC_TAG; in do_tx()
139 io_uring_prep_sendzc(sqe, fd, payload, in do_tx()
143 sqe->ioprio |= IORING_RECVSEND_FIXED_BUF; in do_tx()
144 sqe->buf_index = buf_idx; in do_tx()
146 sqe->user_data = ZC_TAG; in do_tx()
/linux/tools/testing/vsock/
H A Dvsock_uring_test.c62 struct io_uring_sqe *sqe; in vsock_io_uring_client() local
89 sqe = io_uring_get_sqe(&ring); in vsock_io_uring_client()
92 io_uring_prep_sendmsg_zc(sqe, fd, &msg, 0); in vsock_io_uring_client()
94 io_uring_prep_sendmsg(sqe, fd, &msg, 0); in vsock_io_uring_client()
143 struct io_uring_sqe *sqe; in vsock_io_uring_server() local
147 sqe = io_uring_get_sqe(&ring); in vsock_io_uring_server()
151 io_uring_prep_readv(sqe, fd, &iovec, 1, 0); in vsock_io_uring_server()

123