Lines Matching full:req
124 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
126 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
133 req->flags |= REQ_F_FORCE_ASYNC;
137 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
139 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
145 sock = sock_from_file(req->file);
150 io_req_set_res(req, ret, 0);
167 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
169 struct io_async_msghdr *hdr = req->async_data;
182 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr))
183 io_req_async_data_clear(req, REQ_F_NEED_CLEANUP);
186 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
188 struct io_ring_ctx *ctx = req->ctx;
191 hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req);
197 req->flags |= REQ_F_NEED_CLEANUP;
201 static inline void io_mshot_prep_retry(struct io_kiocb *req,
204 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
206 req->flags &= ~REQ_F_BL_EMPTY;
212 static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg,
228 &iomsg->msg.msg_iter, io_is_compat(req->ctx));
233 req->flags |= REQ_F_NEED_CLEANUP;
239 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
244 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
256 if (req->flags & REQ_F_BUFFER_SELECT) {
290 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
294 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
301 if (io_is_compat(req->ctx)) {
304 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ddir, save_addr);
326 if (req->flags & REQ_F_BUFFER_SELECT) {
343 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
345 struct io_async_msghdr *io = req->async_data;
350 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
352 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
353 struct io_async_msghdr *kmsg = req->async_data;
379 req->flags |= REQ_F_IMPORT_BUFFER;
382 if (req->flags & REQ_F_BUFFER_SELECT)
386 return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
391 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
393 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
394 struct io_async_msghdr *kmsg = req->async_data;
399 ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL);
407 return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov,
410 if (req->flags & REQ_F_BUFFER_SELECT)
412 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
417 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
419 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
428 req->flags |= REQ_F_NOWAIT;
429 if (req->flags & REQ_F_BUFFER_SELECT)
430 sr->buf_group = req->buf_index;
432 if (req->opcode == IORING_OP_SENDMSG)
435 req->flags |= REQ_F_MULTISHOT;
438 if (io_is_compat(req->ctx))
441 if (unlikely(!io_msg_alloc_async(req)))
443 if (req->opcode != IORING_OP_SENDMSG)
444 return io_send_setup(req, sqe);
447 return io_sendmsg_setup(req, sqe);
450 static void io_req_msg_cleanup(struct io_kiocb *req,
453 io_netmsg_recycle(req, issue_flags);
495 static int io_net_kbuf_recyle(struct io_kiocb *req, struct io_buffer_list *bl,
498 req->flags |= REQ_F_BL_NO_RECYCLE;
499 if (req->flags & REQ_F_BUFFERS_COMMIT)
500 io_kbuf_commit(req, bl, len, io_bundle_nbufs(kmsg, len));
504 static inline bool io_send_finish(struct io_kiocb *req,
508 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
513 cflags = io_put_kbuf(req, sel->val, sel->buf_list);
517 cflags = io_put_kbufs(req, sel->val, sel->buf_list, io_bundle_nbufs(kmsg, sel->val));
519 if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
526 if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
527 io_mshot_prep_retry(req, kmsg);
533 io_req_set_res(req, sel->val, cflags);
538 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
540 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
541 struct io_async_msghdr *kmsg = req->async_data;
547 sock = sock_from_file(req->file);
551 if (!(req->flags & REQ_F_POLLED) &&
576 req_set_fail(req);
578 io_req_msg_cleanup(req, issue_flags);
583 io_req_set_res(req, ret, 0);
587 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
590 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
610 ret = io_buffers_select(req, &arg, sel, issue_flags);
617 req->flags |= REQ_F_NEED_CLEANUP;
635 int io_send(struct io_kiocb *req, unsigned int issue_flags)
637 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
638 struct io_async_msghdr *kmsg = req->async_data;
645 sock = sock_from_file(req->file);
649 if (!(req->flags & REQ_F_POLLED) &&
659 if (io_do_buffer_select(req)) {
660 ret = io_send_select_buffer(req, issue_flags, &sel, kmsg);
684 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
688 req_set_fail(req);
696 if (!io_send_finish(req, kmsg, &sel))
699 io_req_msg_cleanup(req, issue_flags);
703 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
707 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
727 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
733 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr);
737 if (!(req->flags & REQ_F_BUFFER_SELECT)) {
738 ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen,
743 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
747 static int io_recvmsg_prep_setup(struct io_kiocb *req)
749 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
752 kmsg = io_msg_alloc_async(req);
756 if (req->opcode == IORING_OP_RECV) {
766 if (req->flags & REQ_F_BUFFER_SELECT)
772 return io_recvmsg_copy_hdr(req, kmsg);
778 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
780 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
794 req->flags |= REQ_F_NOWAIT;
796 req->flags |= REQ_F_CLEAR_POLLIN;
797 if (req->flags & REQ_F_BUFFER_SELECT)
798 sr->buf_group = req->buf_index;
801 if (!(req->flags & REQ_F_BUFFER_SELECT))
805 if (req->opcode == IORING_OP_RECV) {
813 req->flags |= REQ_F_APOLL_MULTISHOT;
819 if (req->opcode == IORING_OP_RECVMSG)
823 if (io_is_compat(req->ctx))
827 return io_recvmsg_prep_setup(req);
839 static inline bool io_recv_finish(struct io_kiocb *req,
844 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
866 cflags |= io_put_kbufs(req, this_ret, sel->buf_list, io_bundle_nbufs(kmsg, this_ret));
868 cflags = req->cqe.flags | (cflags & CQE_F_MASK);
872 if (req->flags & REQ_F_BL_EMPTY)
881 req->cqe.flags = cflags & ~CQE_F_MASK;
888 cflags |= io_put_kbuf(req, sel->val, sel->buf_list);
895 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
896 io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
898 io_mshot_prep_retry(req, kmsg);
916 io_req_set_res(req, sel->val, cflags);
918 io_req_msg_cleanup(req, issue_flags);
1006 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
1008 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1009 struct io_async_msghdr *kmsg = req->async_data;
1017 sock = sock_from_file(req->file);
1021 if (!(req->flags & REQ_F_POLLED) &&
1031 if (io_do_buffer_select(req)) {
1034 sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
1038 if (req->flags & REQ_F_APOLL_MULTISHOT) {
1041 io_kbuf_recycle(req, sel.buf_list, issue_flags);
1051 if (req->flags & REQ_F_APOLL_MULTISHOT) {
1065 io_kbuf_recycle(req, sel.buf_list, issue_flags);
1070 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
1074 req_set_fail(req);
1076 req_set_fail(req);
1084 io_kbuf_recycle(req, sel.buf_list, issue_flags);
1087 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
1093 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1096 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1127 ret = io_buffers_peek(req, &arg, sel);
1134 req->flags |= REQ_F_NEED_CLEANUP;
1150 *sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
1165 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1167 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1168 struct io_async_msghdr *kmsg = req->async_data;
1176 if (!(req->flags & REQ_F_POLLED) &&
1180 sock = sock_from_file(req->file);
1190 if (io_do_buffer_select(req)) {
1192 ret = io_recv_buf_select(req, kmsg, &sel, issue_flags);
1209 io_kbuf_recycle(req, sel.buf_list, issue_flags);
1216 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
1220 req_set_fail(req);
1223 req_set_fail(req);
1232 io_kbuf_recycle(req, sel.buf_list, issue_flags);
1235 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
1241 int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1243 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);
1250 zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx);
1265 req->flags |= REQ_F_APOLL_MULTISHOT;
1270 int io_recvzc(struct io_kiocb *req, unsigned int issue_flags)
1272 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);
1277 if (!(req->flags & REQ_F_POLLED) &&
1281 sock = sock_from_file(req->file);
1286 ret = io_zcrx_recv(req, zc->ifq, sock, zc->msg_flags | MSG_DONTWAIT,
1289 io_req_set_res(req, 0, 0);
1299 req_set_fail(req);
1300 io_req_set_res(req, ret, 0);
1306 void io_send_zc_cleanup(struct io_kiocb *req)
1308 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1309 struct io_async_msghdr *io = req->async_data;
1311 if (req_has_async_data(req))
1323 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1325 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1326 struct io_ring_ctx *ctx = req->ctx;
1336 if (req->flags & REQ_F_CQE_SKIP)
1342 notif->cqe.user_data = req->cqe.user_data;
1345 req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY;
1362 req->buf_index = READ_ONCE(sqe->buf_index);
1364 req->flags |= REQ_F_NOWAIT;
1366 if (io_is_compat(req->ctx))
1369 iomsg = io_msg_alloc_async(req);
1373 if (req->opcode == IORING_OP_SEND_ZC) {
1374 ret = io_send_setup(req, sqe);
1378 ret = io_sendmsg_setup(req, sqe);
1441 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
1443 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1444 struct io_async_msghdr *kmsg = req->async_data;
1448 sr->notif->buf_index = req->buf_index;
1454 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1456 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1457 struct io_async_msghdr *kmsg = req->async_data;
1462 sock = sock_from_file(req->file);
1468 if (!(req->flags & REQ_F_POLLED) &&
1472 if (req->flags & REQ_F_IMPORT_BUFFER) {
1473 req->flags &= ~REQ_F_IMPORT_BUFFER;
1474 ret = io_send_zc_import(req, issue_flags);
1502 req_set_fail(req);
1517 io_req_msg_cleanup(req, 0);
1519 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1523 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1525 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1526 struct io_async_msghdr *kmsg = req->async_data;
1531 if (req->flags & REQ_F_IMPORT_BUFFER) {
1535 ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter, req,
1539 req->flags &= ~REQ_F_IMPORT_BUFFER;
1542 sock = sock_from_file(req->file);
1548 if (!(req->flags & REQ_F_POLLED) &&
1572 req_set_fail(req);
1587 io_req_msg_cleanup(req, 0);
1589 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1593 void io_sendrecv_fail(struct io_kiocb *req)
1595 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1598 req->cqe.res = sr->done_io;
1600 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1601 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1602 req->cqe.flags |= IORING_CQE_F_MORE;
1608 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1610 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1636 req->flags |= REQ_F_APOLL_MULTISHOT;
1638 req->flags |= REQ_F_NOWAIT;
1642 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1644 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1654 if (!(req->flags & REQ_F_POLLED) &&
1666 file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
1682 ret = io_fixed_fd_install(req, issue_flags, file,
1690 if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) &&
1691 io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1697 io_req_set_res(req, ret, cflags);
1699 req_set_fail(req);
1703 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1705 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1724 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1726 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1745 req_set_fail(req);
1750 ret = io_fixed_fd_install(req, issue_flags, file,
1753 io_req_set_res(req, ret, 0);
1757 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1759 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1769 io = io_msg_alloc_async(req);
1776 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1778 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1779 struct io_async_msghdr *io = req->async_data;
1787 if (vfs_poll(req->file, &pt) & EPOLLERR)
1793 ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
1815 ret = sock_error(sock_from_file(req->file)->sk);
1822 req_set_fail(req);
1823 io_req_msg_cleanup(req, issue_flags);
1824 io_req_set_res(req, ret, 0);
1828 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1830 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1840 io = io_msg_alloc_async(req);
1846 int io_bind(struct io_kiocb *req, unsigned int issue_flags)
1848 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1849 struct io_async_msghdr *io = req->async_data;
1853 sock = sock_from_file(req->file);
1859 req_set_fail(req);
1860 io_req_set_res(req, ret, 0);
1864 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1866 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1875 int io_listen(struct io_kiocb *req, unsigned int issue_flags)
1877 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1881 sock = sock_from_file(req->file);
1887 req_set_fail(req);
1888 io_req_set_res(req, ret, 0);