Lines Matching full:if

104  * Number of times we'll try and do receives if there's more data. If we
127 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || in io_shutdown_prep()
145 if (unlikely(!sock)) in io_shutdown()
155 if (!(flags & MSG_WAITALL)) in io_net_retry()
162 if (kmsg->vec.iovec) in io_netmsg_iovec_free()
170 /* can't recycle, ensure we free the iovec if we have one */ in io_netmsg_recycle()
171 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { in io_netmsg_recycle()
176 /* Let normal cleanup path reap it if we fail adding to the cache */ in io_netmsg_recycle()
178 if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP) in io_netmsg_recycle()
181 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) { in io_netmsg_recycle()
193 if (!hdr) in io_msg_alloc_async()
196 /* If the async data was cached, we might have an iov cached inside. */ in io_msg_alloc_async()
197 if (hdr->vec.iovec) in io_msg_alloc_async()
220 if (iomsg->vec.iovec) { in io_net_import_vec()
230 if (unlikely(ret < 0)) in io_net_import_vec()
233 if (iov) { in io_net_import_vec()
249 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) in io_compat_msg_copy_hdr()
253 if (ret) in io_compat_msg_copy_hdr()
257 if (req->flags & REQ_F_BUFFER_SELECT) { in io_compat_msg_copy_hdr()
258 if (msg->msg_iovlen == 0) { in io_compat_msg_copy_hdr()
260 } else if (msg->msg_iovlen > 1) { in io_compat_msg_copy_hdr()
265 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) in io_compat_msg_copy_hdr()
276 if (!user_access_begin(umsg, sizeof(*umsg))) in io_copy_msghdr_from_user()
302 if (io_is_compat(req->ctx)) { in io_msg_copy_hdr()
306 if (ret) in io_msg_copy_hdr()
318 if (unlikely(ret)) in io_msg_copy_hdr()
324 if (ret) in io_msg_copy_hdr()
327 if (req->flags & REQ_F_BUFFER_SELECT) { in io_msg_copy_hdr()
328 if (msg->msg_iovlen == 0) { in io_msg_copy_hdr()
330 } else if (msg->msg_iovlen > 1) { in io_msg_copy_hdr()
336 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) in io_msg_copy_hdr()
361 if (READ_ONCE(sqe->__pad3[0])) in io_send_setup()
372 if (addr) { in io_send_setup()
374 if (unlikely(ret < 0)) in io_send_setup()
379 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { in io_send_setup()
383 if (req->flags & REQ_F_BUFFER_SELECT) in io_send_setup()
386 if (sr->flags & IORING_SEND_VECTORIZED) in io_send_setup()
401 if (unlikely(ret)) in io_sendmsg_setup()
406 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { in io_sendmsg_setup()
411 if (req->flags & REQ_F_BUFFER_SELECT) in io_sendmsg_setup()
425 if (sr->flags & ~SENDMSG_FLAGS) in io_sendmsg_prep()
428 if (sr->msg_flags & MSG_DONTWAIT) in io_sendmsg_prep()
430 if (req->flags & REQ_F_BUFFER_SELECT) in io_sendmsg_prep()
432 if (sr->flags & IORING_RECVSEND_BUNDLE) { in io_sendmsg_prep()
433 if (req->opcode == IORING_OP_SENDMSG) in io_sendmsg_prep()
440 if (io_is_compat(req->ctx)) in io_sendmsg_prep()
443 if (unlikely(!io_msg_alloc_async(req))) in io_sendmsg_prep()
445 if (req->opcode != IORING_OP_SENDMSG) in io_sendmsg_prep()
447 if (unlikely(sqe->addr2 || sqe->file_index)) in io_sendmsg_prep()
460 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
461 * could be using an ITER_IOVEC. If the latter, then if we consumed all of
462 * the segments, then it's a trivial questiont o answer. If we have residual
472 if (ret <= 0) in io_bundle_nbufs()
474 if (iter_is_ubuf(&kmsg->msg.msg_iter)) in io_bundle_nbufs()
478 if (!iov) in io_bundle_nbufs()
481 /* if all data was transferred, it's basic pointer math */ in io_bundle_nbufs()
482 if (!iov_iter_count(&kmsg->msg.msg_iter)) in io_bundle_nbufs()
501 if (req->flags & REQ_F_BUFFERS_COMMIT) in io_net_kbuf_recyle()
514 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) { in io_send_finish()
521 if (bundle_finished || req->flags & REQ_F_BL_EMPTY) in io_send_finish()
525 * Fill CQE for this receive and see if we should keep trying to in io_send_finish()
528 if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { in io_send_finish()
550 if (unlikely(!sock)) in io_sendmsg()
553 if (!(req->flags & REQ_F_POLLED) && in io_sendmsg()
558 if (issue_flags & IO_URING_F_NONBLOCK) in io_sendmsg()
560 if (flags & MSG_WAITALL) in io_sendmsg()
567 if (ret < min_ret) { in io_sendmsg()
568 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_sendmsg()
570 if (ret > 0 && io_net_retry(sock, flags)) { in io_sendmsg()
576 if (ret == -ERESTARTSYS) in io_sendmsg()
581 if (ret >= 0) in io_sendmsg()
583 else if (sr->done_io) in io_sendmsg()
602 if (kmsg->vec.iovec) { in io_send_select_buffer()
608 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) in io_send_select_buffer()
614 if (unlikely(ret < 0)) in io_send_select_buffer()
617 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { in io_send_select_buffer()
624 if (ret == 1) { in io_send_select_buffer()
628 if (unlikely(ret)) in io_send_select_buffer()
648 if (unlikely(!sock)) in io_send()
651 if (!(req->flags & REQ_F_POLLED) && in io_send()
656 if (issue_flags & IO_URING_F_NONBLOCK) in io_send()
660 if (io_do_buffer_select(req)) { in io_send()
662 if (ret) in io_send()
667 * If MSG_WAITALL is set, or this is a bundle send, then we need in io_send()
668 * the full amount. If just bundle is set, if we do a short send in io_send()
671 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE) in io_send()
677 if (ret < min_ret) { in io_send()
678 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_send()
681 if (ret > 0 && io_net_retry(sock, flags)) { in io_send()
687 if (ret == -ERESTARTSYS) in io_send()
691 if (ret >= 0) in io_send()
693 else if (sr->done_io) in io_send()
696 if (!io_send_finish(req, &ret, kmsg, issue_flags)) in io_send()
707 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == in io_recvmsg_mshot_prep()
711 if (unlikely(namelen < 0)) in io_recvmsg_mshot_prep()
713 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out), in io_recvmsg_mshot_prep()
716 if (check_add_overflow(hdr, controllen, &hdr)) in io_recvmsg_mshot_prep()
734 if (unlikely(ret)) in io_recvmsg_copy_hdr()
737 if (!(req->flags & REQ_F_BUFFER_SELECT)) { in io_recvmsg_copy_hdr()
740 if (unlikely(ret)) in io_recvmsg_copy_hdr()
753 if (unlikely(!kmsg)) in io_recvmsg_prep_setup()
756 if (req->opcode == IORING_OP_RECV) { in io_recvmsg_prep_setup()
766 if (req->flags & REQ_F_BUFFER_SELECT) in io_recvmsg_prep_setup()
784 if (unlikely(sqe->addr2)) in io_recvmsg_prep()
790 if (sr->flags & ~RECVMSG_FLAGS) in io_recvmsg_prep()
793 if (sr->msg_flags & MSG_DONTWAIT) in io_recvmsg_prep()
795 if (sr->msg_flags & MSG_ERRQUEUE) in io_recvmsg_prep()
797 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg_prep()
800 * as if we end up doing an io-wq based issue that selects a in io_recvmsg_prep()
810 if (sr->flags & IORING_RECV_MULTISHOT) { in io_recvmsg_prep()
811 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_recvmsg_prep()
813 if (sr->msg_flags & MSG_WAITALL) in io_recvmsg_prep()
815 if (req->opcode == IORING_OP_RECV) { in io_recvmsg_prep()
818 if (sr->mshot_total_len) in io_recvmsg_prep()
820 } else if (sqe->optlen) { in io_recvmsg_prep()
824 } else if (sqe->optlen) { in io_recvmsg_prep()
828 if (sr->flags & IORING_RECVSEND_BUNDLE) { in io_recvmsg_prep()
829 if (req->opcode == IORING_OP_RECVMSG) in io_recvmsg_prep()
833 if (io_is_compat(req->ctx)) in io_recvmsg_prep()
846 * Returns true if it is actually finished, or false if it should run
856 if (kmsg->msg.msg_inq > 0) in io_recv_finish()
859 if (*ret > 0 && sr->flags & IORING_RECV_MSHOT_LIM) { in io_recv_finish()
861 * If sr->len hits zero, the limit has been reached. Mark in io_recv_finish()
866 if (!sr->mshot_total_len) { in io_recv_finish()
872 if (sr->flags & IORING_RECVSEND_BUNDLE) { in io_recv_finish()
877 if (sr->flags & IORING_RECV_RETRY) in io_recv_finish()
879 if (sr->mshot_len && *ret >= sr->mshot_len) in io_recv_finish()
882 if (req->flags & REQ_F_BL_EMPTY) in io_recv_finish()
885 * If more is available AND it was a full transfer, retry and in io_recv_finish()
888 if (!(sr->flags & IORING_RECV_NO_RETRY) && in io_recv_finish()
902 * Fill CQE for this receive and see if we should keep trying to in io_recv_finish()
905 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && in io_recv_finish()
910 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { in io_recv_finish()
911 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY && in io_recv_finish()
918 if (issue_flags & IO_URING_F_MULTISHOT) in io_recv_finish()
941 if (*len < hdr) in io_recvmsg_prep_multishot()
944 if (kmsg->controllen) { in io_recvmsg_prep_multishot()
970 if (kmsg->namelen) in io_recvmsg_multishot()
975 if (sock->file->f_flags & O_NONBLOCK) in io_recvmsg_multishot()
980 if (err < 0) in io_recvmsg_multishot()
989 if (err > kmsg->payloadlen) in io_recvmsg_multishot()
993 if (kmsg->msg.msg_namelen > kmsg->namelen) in io_recvmsg_multishot()
1007 if (copy_to_user(io->buf, &hdr, copy_len)) { in io_recvmsg_multishot()
1027 if (unlikely(!sock)) in io_recvmsg()
1030 if (!(req->flags & REQ_F_POLLED) && in io_recvmsg()
1035 if (force_nonblock) in io_recvmsg()
1039 if (io_do_buffer_select(req)) { in io_recvmsg()
1044 if (!buf) in io_recvmsg()
1047 if (req->flags & REQ_F_APOLL_MULTISHOT) { in io_recvmsg()
1049 if (ret) { in io_recvmsg()
1060 if (req->flags & REQ_F_APOLL_MULTISHOT) { in io_recvmsg()
1065 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) in io_recvmsg()
1072 if (ret < min_ret) { in io_recvmsg()
1073 if (ret == -EAGAIN && force_nonblock) { in io_recvmsg()
1074 if (issue_flags & IO_URING_F_MULTISHOT) in io_recvmsg()
1079 if (ret > 0 && io_net_retry(sock, flags)) { in io_recvmsg()
1083 if (ret == -ERESTARTSYS) in io_recvmsg()
1086 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recvmsg()
1090 if (ret > 0) in io_recvmsg()
1092 else if (sr->done_io) in io_recvmsg()
1097 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) in io_recvmsg()
1110 * If the ring isn't locked, then don't use the peek interface in io_recv_buf_select()
1114 if (!(issue_flags & IO_URING_F_UNLOCKED) && in io_recv_buf_select()
1123 if (kmsg->vec.iovec) { in io_recv_buf_select()
1129 if (*len) in io_recv_buf_select()
1131 else if (kmsg->msg.msg_inq > 1) in io_recv_buf_select()
1134 /* if mshot limited, ensure we don't go over */ in io_recv_buf_select()
1135 if (sr->flags & IORING_RECV_MSHOT_LIM) in io_recv_buf_select()
1138 if (unlikely(ret < 0)) in io_recv_buf_select()
1141 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { in io_recv_buf_select()
1146 if (arg.partial_map) in io_recv_buf_select()
1150 if (ret == 1) { in io_recv_buf_select()
1162 if (!buf) in io_recv_buf_select()
1169 if (unlikely(ret)) in io_recv_buf_select()
1187 if (!(req->flags & REQ_F_POLLED) && in io_recv()
1192 if (unlikely(!sock)) in io_recv()
1196 if (force_nonblock) in io_recv()
1200 if (io_do_buffer_select(req)) { in io_recv()
1202 if (unlikely(ret)) { in io_recv()
1212 if (flags & MSG_WAITALL) in io_recv()
1216 if (ret < min_ret) { in io_recv()
1217 if (ret == -EAGAIN && force_nonblock) { in io_recv()
1218 if (issue_flags & IO_URING_F_MULTISHOT) in io_recv()
1223 if (ret > 0 && io_net_retry(sock, flags)) { in io_recv()
1229 if (ret == -ERESTARTSYS) in io_recv()
1232 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recv()
1238 if (ret > 0) in io_recv()
1240 else if (sr->done_io) in io_recv()
1245 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) in io_recv()
1256 if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3)) in io_recvzc_prep()
1261 if (!zc->ifq) in io_recvzc_prep()
1267 if (zc->msg_flags) in io_recvzc_prep()
1269 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)) in io_recvzc_prep()
1272 if (!(zc->flags & IORING_RECV_MULTISHOT)) in io_recvzc_prep()
1287 if (!(req->flags & REQ_F_POLLED) && in io_recvzc()
1292 if (unlikely(!sock)) in io_recvzc()
1298 if (len && zc->len == 0) { in io_recvzc()
1303 if (unlikely(ret <= 0) && ret != -EAGAIN) { in io_recvzc()
1304 if (ret == -ERESTARTSYS) in io_recvzc()
1306 if (ret == IOU_REQUEUE) in io_recvzc()
1321 if (req_has_async_data(req)) in io_send_zc_cleanup()
1323 if (zc->notif) { in io_send_zc_cleanup()
1343 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) in io_send_zc_prep()
1346 if (req->flags & REQ_F_CQE_SKIP) in io_send_zc_prep()
1350 if (!notif) in io_send_zc_prep()
1358 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { in io_send_zc_prep()
1359 if (zc->flags & ~IO_ZC_FLAGS_VALID) in io_send_zc_prep()
1361 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { in io_send_zc_prep()
1373 if (zc->msg_flags & MSG_DONTWAIT) in io_send_zc_prep()
1376 if (io_is_compat(req->ctx)) in io_send_zc_prep()
1380 if (unlikely(!iomsg)) in io_send_zc_prep()
1383 if (req->opcode == IORING_OP_SEND_ZC) { in io_send_zc_prep()
1386 if (unlikely(sqe->addr2 || sqe->file_index)) in io_send_zc_prep()
1390 if (unlikely(ret)) in io_send_zc_prep()
1393 if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) { in io_send_zc_prep()
1418 if (!frag) in io_sg_from_iter()
1420 else if (unlikely(!skb_zcopy_managed(skb))) in io_sg_from_iter()
1436 if (bi.bi_size) in io_sg_from_iter()
1473 if (unlikely(!sock)) in io_send_zc()
1475 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) in io_send_zc()
1478 if (!(req->flags & REQ_F_POLLED) && in io_send_zc()
1482 if (req->flags & REQ_F_IMPORT_BUFFER) { in io_send_zc()
1485 if (unlikely(ret)) in io_send_zc()
1490 if (issue_flags & IO_URING_F_NONBLOCK) in io_send_zc()
1492 if (msg_flags & MSG_WAITALL) in io_send_zc()
1500 if (unlikely(ret < min_ret)) { in io_send_zc()
1501 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_send_zc()
1504 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) { in io_send_zc()
1510 if (ret == -ERESTARTSYS) in io_send_zc()
1515 if (ret >= 0) in io_send_zc()
1517 else if (zc->done_io) in io_send_zc()
1521 * If we're in io-wq we can't rely on tw ordering guarantees, defer in io_send_zc()
1524 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_send_zc()
1541 if (req->flags & REQ_F_IMPORT_BUFFER) { in io_sendmsg_zc()
1547 if (unlikely(ret)) in io_sendmsg_zc()
1553 if (unlikely(!sock)) in io_sendmsg_zc()
1555 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) in io_sendmsg_zc()
1558 if (!(req->flags & REQ_F_POLLED) && in io_sendmsg_zc()
1563 if (issue_flags & IO_URING_F_NONBLOCK) in io_sendmsg_zc()
1565 if (flags & MSG_WAITALL) in io_sendmsg_zc()
1572 if (unlikely(ret < min_ret)) { in io_sendmsg_zc()
1573 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_sendmsg_zc()
1576 if (ret > 0 && io_net_retry(sock, flags)) { in io_sendmsg_zc()
1580 if (ret == -ERESTARTSYS) in io_sendmsg_zc()
1585 if (ret >= 0) in io_sendmsg_zc()
1587 else if (sr->done_io) in io_sendmsg_zc()
1591 * If we're in io-wq we can't rely on tw ordering guarantees, defer in io_sendmsg_zc()
1594 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_sendmsg_zc()
1607 if (sr->done_io) in io_sendrecv_fail()
1610 if ((req->flags & REQ_F_NEED_CLEANUP) && in io_sendrecv_fail()
1622 if (sqe->len || sqe->buf_index) in io_accept_prep()
1630 if (accept->iou_flags & ~ACCEPT_FLAGS) in io_accept_prep()
1634 if (accept->file_slot) { in io_accept_prep()
1635 if (accept->flags & SOCK_CLOEXEC) in io_accept_prep()
1637 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT && in io_accept_prep()
1641 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) in io_accept_prep()
1643 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) in io_accept_prep()
1645 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT) in io_accept_prep()
1647 if (accept->iou_flags & IORING_ACCEPT_DONTWAIT) in io_accept_prep()
1664 if (!(req->flags & REQ_F_POLLED) && in io_accept()
1669 if (!fixed) { in io_accept()
1671 if (unlikely(fd < 0)) in io_accept()
1678 if (IS_ERR(file)) { in io_accept()
1679 if (!fixed) in io_accept()
1682 if (ret == -EAGAIN && force_nonblock && in io_accept()
1686 if (ret == -ERESTARTSYS) in io_accept()
1688 } else if (!fixed) { in io_accept()
1697 if (!arg.is_empty) in io_accept()
1700 if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) && in io_accept()
1702 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1) in io_accept()
1708 if (ret < 0) in io_accept()
1717 if (sqe->addr || sqe->rw_flags || sqe->buf_index) in io_socket_prep()
1727 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) in io_socket_prep()
1729 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) in io_socket_prep()
1741 if (!fixed) { in io_socket()
1743 if (unlikely(fd < 0)) in io_socket()
1747 if (IS_ERR(file)) { in io_socket()
1748 if (!fixed) in io_socket()
1751 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) in io_socket()
1753 if (ret == -ERESTARTSYS) in io_socket()
1756 } else if (!fixed) { in io_socket()
1772 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) in io_connect_prep()
1780 if (unlikely(!io)) in io_connect_prep()
1794 if (connect->in_progress) { in io_connect()
1797 if (vfs_poll(req->file, &pt) & EPOLLERR) in io_connect()
1805 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) in io_connect()
1807 if (ret == -EINPROGRESS) { in io_connect()
1809 } else if (ret == -ECONNABORTED) { in io_connect()
1810 if (connect->seen_econnaborted) in io_connect()
1816 if (connect->in_progress) { in io_connect()
1823 if (ret == -EBADFD || ret == -EISCONN) { in io_connect()
1828 if (ret == -ERESTARTSYS) in io_connect()
1831 if (ret < 0) in io_connect()
1844 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) in io_bind_prep()
1851 if (unlikely(!io)) in io_bind_prep()
1864 if (unlikely(!sock)) in io_bind()
1868 if (ret < 0) in io_bind()
1878 if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2) in io_listen_prep()
1892 if (unlikely(!sock)) in io_listen()
1896 if (ret < 0) in io_listen()