Lines Matching full:if
105 * Number of times we'll try and do receives if there's more data. If we
128 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
146 if (unlikely(!sock))
156 if (!(flags & MSG_WAITALL))
163 if (kmsg->vec.iovec)
171 /* can't recycle, ensure we free the iovec if we have one */
172 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
177 /* Let normal cleanup path reap it if we fail adding to the cache */
179 if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP)
182 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr))
192 if (!hdr)
195 /* If the async data was cached, we might have an iov cached inside. */
196 if (hdr->vec.iovec)
219 if (iomsg->vec.iovec) {
229 if (unlikely(ret < 0))
232 if (iov) {
248 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
252 if (ret)
256 if (req->flags & REQ_F_BUFFER_SELECT) {
257 if (msg->msg_iovlen == 0) {
259 } else if (msg->msg_iovlen > 1) {
264 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
275 if (!user_access_begin(umsg, sizeof(*umsg)))
301 if (io_is_compat(req->ctx)) {
305 if (ret)
317 if (unlikely(ret))
323 if (ret)
326 if (req->flags & REQ_F_BUFFER_SELECT) {
327 if (msg->msg_iovlen == 0) {
329 } else if (msg->msg_iovlen > 1) {
335 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
360 if (READ_ONCE(sqe->__pad3[0]))
371 if (addr) {
373 if (unlikely(ret < 0))
378 if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
382 if (req->flags & REQ_F_BUFFER_SELECT)
385 if (sr->flags & IORING_SEND_VECTORIZED)
400 if (unlikely(ret))
405 if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
410 if (req->flags & REQ_F_BUFFER_SELECT)
424 if (sr->flags & ~SENDMSG_FLAGS)
427 if (sr->msg_flags & MSG_DONTWAIT)
429 if (req->flags & REQ_F_BUFFER_SELECT)
431 if (sr->flags & IORING_RECVSEND_BUNDLE) {
432 if (req->opcode == IORING_OP_SENDMSG)
438 if (io_is_compat(req->ctx))
441 if (unlikely(!io_msg_alloc_async(req)))
443 if (req->opcode != IORING_OP_SENDMSG)
445 if (unlikely(sqe->addr2 || sqe->file_index))
458 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
459 * could be using an ITER_IOVEC. If the latter, then if we consumed all of
460 * the segments, then it's a trivial questiont o answer. If we have residual
470 if (ret <= 0)
472 if (iter_is_ubuf(&kmsg->msg.msg_iter))
476 if (!iov)
479 /* if all data was transferred, it's basic pointer math */
480 if (!iov_iter_count(&kmsg->msg.msg_iter))
499 if (req->flags & REQ_F_BUFFERS_COMMIT)
512 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
519 if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
523 * Fill CQE for this receive and see if we should keep trying to
526 if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
548 if (unlikely(!sock))
551 if (!(req->flags & REQ_F_POLLED) &&
556 if (issue_flags & IO_URING_F_NONBLOCK)
558 if (flags & MSG_WAITALL)
565 if (ret < min_ret) {
566 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
568 if (ret > 0 && io_net_retry(sock, flags)) {
574 if (ret == -ERESTARTSYS)
579 if (ret >= 0)
581 else if (sr->done_io)
599 if (kmsg->vec.iovec) {
605 if (!(sr->flags & IORING_RECVSEND_BUNDLE))
611 if (unlikely(ret < 0))
614 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) {
621 if (ret == 1) {
625 if (unlikely(ret))
646 if (unlikely(!sock))
649 if (!(req->flags & REQ_F_POLLED) &&
654 if (issue_flags & IO_URING_F_NONBLOCK)
659 if (io_do_buffer_select(req)) {
661 if (ret)
666 * If MSG_WAITALL is set, or this is a bundle send, then we need
667 * the full amount. If just bundle is set, if we do a short send
670 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
676 if (ret < min_ret) {
677 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
680 if (ret > 0 && io_net_retry(sock, flags)) {
686 if (ret == -ERESTARTSYS)
690 if (ret >= 0)
692 else if (sr->done_io)
696 if (!io_send_finish(req, kmsg, &sel))
707 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
711 if (unlikely(namelen < 0))
713 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
716 if (check_add_overflow(hdr, controllen, &hdr))
734 if (unlikely(ret))
737 if (!(req->flags & REQ_F_BUFFER_SELECT)) {
740 if (unlikely(ret))
753 if (unlikely(!kmsg))
756 if (req->opcode == IORING_OP_RECV) {
766 if (req->flags & REQ_F_BUFFER_SELECT)
784 if (unlikely(sqe->addr2))
790 if (sr->flags & ~RECVMSG_FLAGS)
793 if (sr->msg_flags & MSG_DONTWAIT)
795 if (sr->msg_flags & MSG_ERRQUEUE)
797 if (req->flags & REQ_F_BUFFER_SELECT)
800 if (sr->flags & IORING_RECV_MULTISHOT) {
801 if (!(req->flags & REQ_F_BUFFER_SELECT))
803 if (sr->msg_flags & MSG_WAITALL)
805 if (req->opcode == IORING_OP_RECV) {
808 if (sr->mshot_total_len)
810 } else if (sqe->optlen) {
814 } else if (sqe->optlen) {
818 if (sr->flags & IORING_RECVSEND_BUNDLE) {
819 if (req->opcode == IORING_OP_RECVMSG)
823 if (io_is_compat(req->ctx))
836 * Returns true if it is actually finished, or false if it should run
847 if (kmsg->msg.msg_inq > 0)
850 if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) {
852 * If sr->len hits zero, the limit has been reached. Mark
857 if (!sr->mshot_total_len) {
863 if (sr->flags & IORING_RECVSEND_BUNDLE) {
867 if (sr->flags & IORING_RECV_RETRY)
869 if (sr->mshot_len && sel->val >= sr->mshot_len)
872 if (req->flags & REQ_F_BL_EMPTY)
875 * If more is available AND it was a full transfer, retry and
878 if (!(sr->flags & IORING_RECV_NO_RETRY) &&
892 * Fill CQE for this receive and see if we should keep trying to
895 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
900 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
901 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY &&
908 if (issue_flags & IO_URING_F_MULTISHOT)
931 if (*len < hdr)
934 if (kmsg->controllen) {
960 if (kmsg->namelen)
965 if (sock->file->f_flags & O_NONBLOCK)
970 if (err < 0)
979 if (err > kmsg->payloadlen)
983 if (kmsg->msg.msg_namelen > kmsg->namelen)
997 if (copy_to_user(io->buf, &hdr, copy_len)) {
1018 if (unlikely(!sock))
1021 if (!(req->flags & REQ_F_POLLED) &&
1026 if (force_nonblock)
1031 if (io_do_buffer_select(req)) {
1035 if (!sel.addr)
1038 if (req->flags & REQ_F_APOLL_MULTISHOT) {
1040 if (ret) {
1051 if (req->flags & REQ_F_APOLL_MULTISHOT) {
1056 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
1063 if (ret < min_ret) {
1064 if (ret == -EAGAIN && force_nonblock) {
1068 if (ret > 0 && io_net_retry(sock, flags)) {
1072 if (ret == -ERESTARTSYS)
1075 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1079 if (ret > 0)
1081 else if (sr->done_io)
1087 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
1100 * If the ring isn't locked, then don't use the peek interface
1104 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1113 if (kmsg->vec.iovec) {
1119 if (sel->val)
1121 else if (kmsg->msg.msg_inq > 1)
1124 /* if mshot limited, ensure we don't go over */
1125 if (sr->flags & IORING_RECV_MSHOT_LIM)
1128 if (unlikely(ret < 0))
1131 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) {
1136 if (arg.partial_map)
1140 if (ret == 1) {
1151 if (!sel->addr)
1158 if (unlikely(ret))
1176 if (!(req->flags & REQ_F_POLLED) &&
1181 if (unlikely(!sock))
1185 if (force_nonblock)
1190 if (io_do_buffer_select(req)) {
1193 if (unlikely(ret < 0)) {
1203 if (flags & MSG_WAITALL)
1207 if (ret < min_ret) {
1208 if (ret == -EAGAIN && force_nonblock) {
1212 if (ret > 0 && io_net_retry(sock, flags)) {
1218 if (ret == -ERESTARTSYS)
1221 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1227 if (ret > 0)
1229 else if (sr->done_io)
1235 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
1246 if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3))
1251 if (!zc->ifq)
1257 if (zc->msg_flags)
1259 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT))
1262 if (!(zc->flags & IORING_RECV_MULTISHOT))
1277 if (!(req->flags & REQ_F_POLLED) &&
1282 if (unlikely(!sock))
1288 if (len && zc->len == 0) {
1293 if (unlikely(ret <= 0) && ret != -EAGAIN) {
1294 if (ret == -ERESTARTSYS)
1296 if (ret == IOU_REQUEUE)
1311 if (req_has_async_data(req))
1313 if (zc->notif) {
1333 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1336 if (req->flags & REQ_F_CQE_SKIP)
1340 if (!notif)
1348 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1349 if (zc->flags & ~IO_ZC_FLAGS_VALID)
1351 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1363 if (zc->msg_flags & MSG_DONTWAIT)
1366 if (io_is_compat(req->ctx))
1370 if (unlikely(!iomsg))
1373 if (req->opcode == IORING_OP_SEND_ZC) {
1376 if (unlikely(sqe->addr2 || sqe->file_index))
1380 if (unlikely(ret))
1383 if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) {
1408 if (!frag)
1410 else if (unlikely(!skb_zcopy_managed(skb)))
1426 if (bi.bi_size)
1463 if (unlikely(!sock))
1465 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1468 if (!(req->flags & REQ_F_POLLED) &&
1472 if (req->flags & REQ_F_IMPORT_BUFFER) {
1475 if (unlikely(ret))
1480 if (issue_flags & IO_URING_F_NONBLOCK)
1482 if (msg_flags & MSG_WAITALL)
1490 if (unlikely(ret < min_ret)) {
1491 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1494 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
1500 if (ret == -ERESTARTSYS)
1505 if (ret >= 0)
1507 else if (zc->done_io)
1511 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1514 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1531 if (req->flags & REQ_F_IMPORT_BUFFER) {
1537 if (unlikely(ret))
1543 if (unlikely(!sock))
1545 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1548 if (!(req->flags & REQ_F_POLLED) &&
1553 if (issue_flags & IO_URING_F_NONBLOCK)
1555 if (flags & MSG_WAITALL)
1562 if (unlikely(ret < min_ret)) {
1563 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1566 if (ret > 0 && io_net_retry(sock, flags)) {
1570 if (ret == -ERESTARTSYS)
1575 if (ret >= 0)
1577 else if (sr->done_io)
1581 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1584 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1597 if (sr->done_io)
1600 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1612 if (sqe->len || sqe->buf_index)
1620 if (accept->iou_flags & ~ACCEPT_FLAGS)
1624 if (accept->file_slot) {
1625 if (accept->flags & SOCK_CLOEXEC)
1627 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
1631 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1633 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1635 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
1637 if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
1654 if (!(req->flags & REQ_F_POLLED) &&
1659 if (!fixed) {
1661 if (unlikely(fd < 0))
1668 if (IS_ERR(file)) {
1669 if (!fixed)
1672 if (ret == -EAGAIN && force_nonblock &&
1676 if (ret == -ERESTARTSYS)
1678 } else if (!fixed) {
1687 if (!arg.is_empty)
1690 if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) &&
1692 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
1698 if (ret < 0)
1707 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1717 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1719 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1731 if (!fixed) {
1733 if (unlikely(fd < 0))
1737 if (IS_ERR(file)) {
1738 if (!fixed)
1741 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1743 if (ret == -ERESTARTSYS)
1746 } else if (!fixed) {
1762 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1770 if (unlikely(!io))
1784 if (connect->in_progress) {
1787 if (vfs_poll(req->file, &pt) & EPOLLERR)
1795 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1797 if (ret == -EINPROGRESS) {
1799 } else if (ret == -ECONNABORTED) {
1800 if (connect->seen_econnaborted)
1806 if (connect->in_progress) {
1813 if (ret == -EBADFD || ret == -EISCONN) {
1818 if (ret == -ERESTARTSYS)
1821 if (ret < 0)
1834 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1841 if (unlikely(!io))
1854 if (unlikely(!sock))
1858 if (ret < 0)
1868 if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
1882 if (unlikely(!sock))
1886 if (ret < 0)