Lines Matching full:req
124 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() argument
126 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown_prep()
133 req->flags |= REQ_F_FORCE_ASYNC; in io_shutdown_prep()
137 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
139 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown()
145 sock = sock_from_file(req->file); in io_shutdown()
150 io_req_set_res(req, ret, 0); in io_shutdown()
167 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_netmsg_recycle() argument
169 struct io_async_msghdr *hdr = req->async_data; in io_netmsg_recycle()
182 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) in io_netmsg_recycle()
183 io_req_async_data_clear(req, REQ_F_NEED_CLEANUP); in io_netmsg_recycle()
186 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req) in io_msg_alloc_async() argument
188 struct io_ring_ctx *ctx = req->ctx; in io_msg_alloc_async()
191 hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req); in io_msg_alloc_async()
197 req->flags |= REQ_F_NEED_CLEANUP; in io_msg_alloc_async()
201 static inline void io_mshot_prep_retry(struct io_kiocb *req, in io_mshot_prep_retry() argument
204 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_mshot_prep_retry()
206 req->flags &= ~REQ_F_BL_EMPTY; in io_mshot_prep_retry()
212 static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg, in io_net_import_vec() argument
228 &iomsg->msg.msg_iter, io_is_compat(req->ctx)); in io_net_import_vec()
233 req->flags |= REQ_F_NEED_CLEANUP; in io_net_import_vec()
239 static int io_compat_msg_copy_hdr(struct io_kiocb *req, in io_compat_msg_copy_hdr() argument
244 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_compat_msg_copy_hdr()
256 if (req->flags & REQ_F_BUFFER_SELECT) { in io_compat_msg_copy_hdr()
290 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, in io_msg_copy_hdr() argument
294 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_msg_copy_hdr()
301 if (io_is_compat(req->ctx)) { in io_msg_copy_hdr()
304 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ddir, save_addr); in io_msg_copy_hdr()
326 if (req->flags & REQ_F_BUFFER_SELECT) { in io_msg_copy_hdr()
343 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) in io_sendmsg_recvmsg_cleanup() argument
345 struct io_async_msghdr *io = req->async_data; in io_sendmsg_recvmsg_cleanup()
350 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_send_setup() argument
352 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_setup()
353 struct io_async_msghdr *kmsg = req->async_data; in io_send_setup()
379 req->flags |= REQ_F_IMPORT_BUFFER; in io_send_setup()
382 if (req->flags & REQ_F_BUFFER_SELECT) in io_send_setup()
386 return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); in io_send_setup()
391 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_setup() argument
393 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg_setup()
394 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg_setup()
399 ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL); in io_sendmsg_setup()
407 return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, in io_sendmsg_setup()
410 if (req->flags & REQ_F_BUFFER_SELECT) in io_sendmsg_setup()
412 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); in io_sendmsg_setup()
417 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
419 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg_prep()
428 req->flags |= REQ_F_NOWAIT; in io_sendmsg_prep()
429 if (req->flags & REQ_F_BUFFER_SELECT) in io_sendmsg_prep()
430 sr->buf_group = req->buf_index; in io_sendmsg_prep()
432 if (req->opcode == IORING_OP_SENDMSG) in io_sendmsg_prep()
435 req->flags |= REQ_F_MULTISHOT; in io_sendmsg_prep()
438 if (io_is_compat(req->ctx)) in io_sendmsg_prep()
441 if (unlikely(!io_msg_alloc_async(req))) in io_sendmsg_prep()
443 if (req->opcode != IORING_OP_SENDMSG) in io_sendmsg_prep()
444 return io_send_setup(req, sqe); in io_sendmsg_prep()
447 return io_sendmsg_setup(req, sqe); in io_sendmsg_prep()
450 static void io_req_msg_cleanup(struct io_kiocb *req, in io_req_msg_cleanup() argument
453 io_netmsg_recycle(req, issue_flags); in io_req_msg_cleanup()
495 static int io_net_kbuf_recyle(struct io_kiocb *req, struct io_buffer_list *bl, in io_net_kbuf_recyle() argument
498 req->flags |= REQ_F_BL_NO_RECYCLE; in io_net_kbuf_recyle()
499 if (req->flags & REQ_F_BUFFERS_COMMIT) in io_net_kbuf_recyle()
500 io_kbuf_commit(req, bl, len, io_bundle_nbufs(kmsg, len)); in io_net_kbuf_recyle()
504 static inline bool io_send_finish(struct io_kiocb *req, in io_send_finish() argument
508 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_finish()
513 cflags = io_put_kbuf(req, sel->val, sel->buf_list); in io_send_finish()
517 cflags = io_put_kbufs(req, sel->val, sel->buf_list, io_bundle_nbufs(kmsg, sel->val)); in io_send_finish()
519 if (bundle_finished || req->flags & REQ_F_BL_EMPTY) in io_send_finish()
526 if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) { in io_send_finish()
527 io_mshot_prep_retry(req, kmsg); in io_send_finish()
533 io_req_set_res(req, sel->val, cflags); in io_send_finish()
538 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg() argument
540 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg()
541 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg()
547 sock = sock_from_file(req->file); in io_sendmsg()
551 if (!(req->flags & REQ_F_POLLED) && in io_sendmsg()
576 req_set_fail(req); in io_sendmsg()
578 io_req_msg_cleanup(req, issue_flags); in io_sendmsg()
583 io_req_set_res(req, ret, 0); in io_sendmsg()
587 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, in io_send_select_buffer() argument
590 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_select_buffer()
610 ret = io_buffers_select(req, &arg, sel, issue_flags); in io_send_select_buffer()
617 req->flags |= REQ_F_NEED_CLEANUP; in io_send_select_buffer()
635 int io_send(struct io_kiocb *req, unsigned int issue_flags) in io_send() argument
637 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send()
638 struct io_async_msghdr *kmsg = req->async_data; in io_send()
645 sock = sock_from_file(req->file); in io_send()
649 if (!(req->flags & REQ_F_POLLED) && in io_send()
659 if (io_do_buffer_select(req)) { in io_send()
660 ret = io_send_select_buffer(req, issue_flags, &sel, kmsg); in io_send()
684 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret); in io_send()
688 req_set_fail(req); in io_send()
696 if (!io_send_finish(req, kmsg, &sel)) in io_send()
699 io_req_msg_cleanup(req, issue_flags); in io_send()
703 static int io_recvmsg_mshot_prep(struct io_kiocb *req, in io_recvmsg_mshot_prep() argument
707 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == in io_recvmsg_mshot_prep()
727 static int io_recvmsg_copy_hdr(struct io_kiocb *req, in io_recvmsg_copy_hdr() argument
733 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr); in io_recvmsg_copy_hdr()
737 if (!(req->flags & REQ_F_BUFFER_SELECT)) { in io_recvmsg_copy_hdr()
738 ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen, in io_recvmsg_copy_hdr()
743 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen, in io_recvmsg_copy_hdr()
747 static int io_recvmsg_prep_setup(struct io_kiocb *req) in io_recvmsg_prep_setup() argument
749 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recvmsg_prep_setup()
752 kmsg = io_msg_alloc_async(req); in io_recvmsg_prep_setup()
756 if (req->opcode == IORING_OP_RECV) { in io_recvmsg_prep_setup()
766 if (req->flags & REQ_F_BUFFER_SELECT) in io_recvmsg_prep_setup()
772 return io_recvmsg_copy_hdr(req, kmsg); in io_recvmsg_prep_setup()
778 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_recvmsg_prep() argument
780 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recvmsg_prep()
794 req->flags |= REQ_F_NOWAIT; in io_recvmsg_prep()
796 req->flags |= REQ_F_CLEAR_POLLIN; in io_recvmsg_prep()
797 if (req->flags & REQ_F_BUFFER_SELECT) in io_recvmsg_prep()
798 sr->buf_group = req->buf_index; in io_recvmsg_prep()
801 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_recvmsg_prep()
805 if (req->opcode == IORING_OP_RECV) { in io_recvmsg_prep()
813 req->flags |= REQ_F_APOLL_MULTISHOT; in io_recvmsg_prep()
819 if (req->opcode == IORING_OP_RECVMSG) in io_recvmsg_prep()
823 if (io_is_compat(req->ctx)) in io_recvmsg_prep()
827 return io_recvmsg_prep_setup(req); in io_recvmsg_prep()
839 static inline bool io_recv_finish(struct io_kiocb *req, in io_recv_finish() argument
844 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recv_finish()
866 cflags |= io_put_kbufs(req, this_ret, sel->buf_list, io_bundle_nbufs(kmsg, this_ret)); in io_recv_finish()
868 cflags = req->cqe.flags | (cflags & CQE_F_MASK); in io_recv_finish()
872 if (req->flags & REQ_F_BL_EMPTY) in io_recv_finish()
881 req->cqe.flags = cflags & ~CQE_F_MASK; in io_recv_finish()
888 cflags |= io_put_kbuf(req, sel->val, sel->buf_list); in io_recv_finish()
895 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && in io_recv_finish()
896 io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) { in io_recv_finish()
898 io_mshot_prep_retry(req, kmsg); in io_recv_finish()
916 io_req_set_res(req, sel->val, cflags); in io_recv_finish()
918 io_req_msg_cleanup(req, issue_flags); in io_recv_finish()
1006 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) in io_recvmsg() argument
1008 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recvmsg()
1009 struct io_async_msghdr *kmsg = req->async_data; in io_recvmsg()
1017 sock = sock_from_file(req->file); in io_recvmsg()
1021 if (!(req->flags & REQ_F_POLLED) && in io_recvmsg()
1031 if (io_do_buffer_select(req)) { in io_recvmsg()
1034 sel = io_buffer_select(req, &len, sr->buf_group, issue_flags); in io_recvmsg()
1038 if (req->flags & REQ_F_APOLL_MULTISHOT) { in io_recvmsg()
1041 io_kbuf_recycle(req, sel.buf_list, issue_flags); in io_recvmsg()
1051 if (req->flags & REQ_F_APOLL_MULTISHOT) { in io_recvmsg()
1065 io_kbuf_recycle(req, sel.buf_list, issue_flags); in io_recvmsg()
1070 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret); in io_recvmsg()
1074 req_set_fail(req); in io_recvmsg()
1076 req_set_fail(req); in io_recvmsg()
1084 io_kbuf_recycle(req, sel.buf_list, issue_flags); in io_recvmsg()
1087 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags)) in io_recvmsg()
1093 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, in io_recv_buf_select() argument
1096 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recv_buf_select()
1127 ret = io_buffers_peek(req, &arg, sel); in io_recv_buf_select()
1134 req->flags |= REQ_F_NEED_CLEANUP; in io_recv_buf_select()
1150 *sel = io_buffer_select(req, &len, sr->buf_group, issue_flags); in io_recv_buf_select()
1165 int io_recv(struct io_kiocb *req, unsigned int issue_flags) in io_recv() argument
1167 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recv()
1168 struct io_async_msghdr *kmsg = req->async_data; in io_recv()
1176 if (!(req->flags & REQ_F_POLLED) && in io_recv()
1180 sock = sock_from_file(req->file); in io_recv()
1190 if (io_do_buffer_select(req)) { in io_recv()
1192 ret = io_recv_buf_select(req, kmsg, &sel, issue_flags); in io_recv()
1209 io_kbuf_recycle(req, sel.buf_list, issue_flags); in io_recv()
1216 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret); in io_recv()
1220 req_set_fail(req); in io_recv()
1223 req_set_fail(req); in io_recv()
1232 io_kbuf_recycle(req, sel.buf_list, issue_flags); in io_recv()
1235 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags)) in io_recv()
1241 int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_recvzc_prep() argument
1243 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); in io_recvzc_prep()
1250 zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx); in io_recvzc_prep()
1265 req->flags |= REQ_F_APOLL_MULTISHOT; in io_recvzc_prep()
1270 int io_recvzc(struct io_kiocb *req, unsigned int issue_flags) in io_recvzc() argument
1272 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); in io_recvzc()
1277 if (!(req->flags & REQ_F_POLLED) && in io_recvzc()
1281 sock = sock_from_file(req->file); in io_recvzc()
1286 ret = io_zcrx_recv(req, zc->ifq, sock, zc->msg_flags | MSG_DONTWAIT, in io_recvzc()
1289 io_req_set_res(req, 0, 0); in io_recvzc()
1299 req_set_fail(req); in io_recvzc()
1300 io_req_set_res(req, ret, 0); in io_recvzc()
1306 void io_send_zc_cleanup(struct io_kiocb *req) in io_send_zc_cleanup() argument
1308 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_zc_cleanup()
1309 struct io_async_msghdr *io = req->async_data; in io_send_zc_cleanup()
1311 if (req_has_async_data(req)) in io_send_zc_cleanup()
1323 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_send_zc_prep() argument
1325 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_zc_prep()
1326 struct io_ring_ctx *ctx = req->ctx; in io_send_zc_prep()
1336 if (req->flags & REQ_F_CQE_SKIP) in io_send_zc_prep()
1342 notif->cqe.user_data = req->cqe.user_data; in io_send_zc_prep()
1345 req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY; in io_send_zc_prep()
1362 req->buf_index = READ_ONCE(sqe->buf_index); in io_send_zc_prep()
1364 req->flags |= REQ_F_NOWAIT; in io_send_zc_prep()
1366 if (io_is_compat(req->ctx)) in io_send_zc_prep()
1369 iomsg = io_msg_alloc_async(req); in io_send_zc_prep()
1373 if (req->opcode == IORING_OP_SEND_ZC) { in io_send_zc_prep()
1374 ret = io_send_setup(req, sqe); in io_send_zc_prep()
1378 ret = io_sendmsg_setup(req, sqe); in io_send_zc_prep()
1441 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags) in io_send_zc_import() argument
1443 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_zc_import()
1444 struct io_async_msghdr *kmsg = req->async_data; in io_send_zc_import()
1448 sr->notif->buf_index = req->buf_index; in io_send_zc_import()
1454 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) in io_send_zc() argument
1456 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_zc()
1457 struct io_async_msghdr *kmsg = req->async_data; in io_send_zc()
1462 sock = sock_from_file(req->file); in io_send_zc()
1468 if (!(req->flags & REQ_F_POLLED) && in io_send_zc()
1472 if (req->flags & REQ_F_IMPORT_BUFFER) { in io_send_zc()
1473 req->flags &= ~REQ_F_IMPORT_BUFFER; in io_send_zc()
1474 ret = io_send_zc_import(req, issue_flags); in io_send_zc()
1502 req_set_fail(req); in io_send_zc()
1517 io_req_msg_cleanup(req, 0); in io_send_zc()
1519 io_req_set_res(req, ret, IORING_CQE_F_MORE); in io_send_zc()
1523 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg_zc() argument
1525 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg_zc()
1526 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg_zc()
1531 if (req->flags & REQ_F_IMPORT_BUFFER) { in io_sendmsg_zc()
1535 ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter, req, in io_sendmsg_zc()
1539 req->flags &= ~REQ_F_IMPORT_BUFFER; in io_sendmsg_zc()
1542 sock = sock_from_file(req->file); in io_sendmsg_zc()
1548 if (!(req->flags & REQ_F_POLLED) && in io_sendmsg_zc()
1572 req_set_fail(req); in io_sendmsg_zc()
1587 io_req_msg_cleanup(req, 0); in io_sendmsg_zc()
1589 io_req_set_res(req, ret, IORING_CQE_F_MORE); in io_sendmsg_zc()
1593 void io_sendrecv_fail(struct io_kiocb *req) in io_sendrecv_fail() argument
1595 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendrecv_fail()
1598 req->cqe.res = sr->done_io; in io_sendrecv_fail()
1600 if ((req->flags & REQ_F_NEED_CLEANUP) && in io_sendrecv_fail()
1601 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) in io_sendrecv_fail()
1602 req->cqe.flags |= IORING_CQE_F_MORE; in io_sendrecv_fail()
1608 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
1610 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); in io_accept_prep()
1636 req->flags |= REQ_F_APOLL_MULTISHOT; in io_accept_prep()
1638 req->flags |= REQ_F_NOWAIT; in io_accept_prep()
1642 int io_accept(struct io_kiocb *req, unsigned int issue_flags) in io_accept() argument
1644 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); in io_accept()
1654 if (!(req->flags & REQ_F_POLLED) && in io_accept()
1666 file = do_accept(req->file, &arg, accept->addr, accept->addr_len, in io_accept()
1682 ret = io_fixed_fd_install(req, issue_flags, file, in io_accept()
1690 if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) && in io_accept()
1691 io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { in io_accept()
1697 io_req_set_res(req, ret, cflags); in io_accept()
1699 req_set_fail(req); in io_accept()
1703 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_socket_prep() argument
1705 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); in io_socket_prep()
1724 int io_socket(struct io_kiocb *req, unsigned int issue_flags) in io_socket() argument
1726 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); in io_socket()
1745 req_set_fail(req); in io_socket()
1750 ret = io_fixed_fd_install(req, issue_flags, file, in io_socket()
1753 io_req_set_res(req, ret, 0); in io_socket()
1757 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
1759 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); in io_connect_prep()
1769 io = io_msg_alloc_async(req); in io_connect_prep()
1776 int io_connect(struct io_kiocb *req, unsigned int issue_flags) in io_connect() argument
1778 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); in io_connect()
1779 struct io_async_msghdr *io = req->async_data; in io_connect()
1787 if (vfs_poll(req->file, &pt) & EPOLLERR) in io_connect()
1793 ret = __sys_connect_file(req->file, &io->addr, connect->addr_len, in io_connect()
1815 ret = sock_error(sock_from_file(req->file)->sk); in io_connect()
1822 req_set_fail(req); in io_connect()
1823 io_req_msg_cleanup(req, issue_flags); in io_connect()
1824 io_req_set_res(req, ret, 0); in io_connect()
1828 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_bind_prep() argument
1830 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); in io_bind_prep()
1840 io = io_msg_alloc_async(req); in io_bind_prep()
1846 int io_bind(struct io_kiocb *req, unsigned int issue_flags) in io_bind() argument
1848 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); in io_bind()
1849 struct io_async_msghdr *io = req->async_data; in io_bind()
1853 sock = sock_from_file(req->file); in io_bind()
1859 req_set_fail(req); in io_bind()
1860 io_req_set_res(req, ret, 0); in io_bind()
1864 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_listen_prep() argument
1866 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); in io_listen_prep()
1875 int io_listen(struct io_kiocb *req, unsigned int issue_flags) in io_listen() argument
1877 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); in io_listen()
1881 sock = sock_from_file(req->file); in io_listen()
1887 req_set_fail(req); in io_listen()
1888 io_req_set_res(req, ret, 0); in io_listen()