Lines Matching full:sr
204 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
207 sr->done_io = 0;
208 sr->flags &= ~IORING_RECV_RETRY_CLEAR;
209 sr->len = sr->mshot_len;
244 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
248 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
258 sr->len = 0;
266 sr->len = tmp_iov.iov_len;
294 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
295 struct user_msghdr __user *umsg = sr->umsg;
328 sr->len = 0;
337 sr->len = tmp_iov.iov_len;
352 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
358 sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
378 if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
385 if (sr->flags & IORING_SEND_VECTORIZED)
386 return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
388 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
393 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
398 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
403 sr->msg_control = kmsg->msg.msg_control_user;
405 if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
419 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
421 sr->done_io = 0;
422 sr->len = READ_ONCE(sqe->len);
423 sr->flags = READ_ONCE(sqe->ioprio);
424 if (sr->flags & ~SENDMSG_FLAGS)
426 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
427 if (sr->msg_flags & MSG_DONTWAIT)
430 sr->buf_group = req->buf_index;
431 if (sr->flags & IORING_RECVSEND_BUNDLE) {
434 sr->msg_flags |= MSG_WAITALL;
439 sr->msg_flags |= MSG_CMSG_COMPAT;
508 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
512 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
540 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
552 (sr->flags & IORING_RECVSEND_POLL_FIRST))
555 flags = sr->msg_flags;
561 kmsg->msg.msg_control_user = sr->msg_control;
571 sr->done_io += ret;
580 ret += sr->done_io;
581 else if (sr->done_io)
582 ret = sr->done_io;
590 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
593 .max_len = min_not_zero(sr->len, INT_MAX),
595 .buf_group = sr->buf_group,
605 if (!(sr->flags & IORING_RECVSEND_BUNDLE))
619 sr->len = arg.out_len;
622 sr->buf = arg.iovs[0].iov_base;
623 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
637 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
650 (sr->flags & IORING_RECVSEND_POLL_FIRST))
653 flags = sr->msg_flags;
670 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
681 sr->len -= ret;
682 sr->buf += ret;
683 sr->done_io += ret;
691 ret += sr->done_io;
692 else if (sr->done_io)
693 ret = sr->done_io;
749 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
768 return import_ubuf(ITER_DEST, sr->buf, sr->len,
780 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
782 sr->done_io = 0;
787 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
788 sr->len = READ_ONCE(sqe->len);
789 sr->flags = READ_ONCE(sqe->ioprio);
790 if (sr->flags & ~RECVMSG_FLAGS)
792 sr->msg_flags = READ_ONCE(sqe->msg_flags);
793 if (sr->msg_flags & MSG_DONTWAIT)
795 if (sr->msg_flags & MSG_ERRQUEUE)
798 sr->buf_group = req->buf_index;
799 sr->mshot_total_len = sr->mshot_len = 0;
800 if (sr->flags & IORING_RECV_MULTISHOT) {
803 if (sr->msg_flags & MSG_WAITALL)
806 sr->mshot_len = sr->len;
807 sr->mshot_total_len = READ_ONCE(sqe->optlen);
808 if (sr->mshot_total_len)
809 sr->flags |= IORING_RECV_MSHOT_LIM;
818 if (sr->flags & IORING_RECVSEND_BUNDLE) {
824 sr->msg_flags |= MSG_CMSG_COMPAT;
826 sr->nr_multishot_loops = 0;
844 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
850 if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) {
852 * If sr->len hits zero, the limit has been reached. Mark
856 sr->mshot_total_len -= min_t(int, sel->val, sr->mshot_total_len);
857 if (!sr->mshot_total_len) {
858 sr->flags |= IORING_RECV_MSHOT_DONE;
863 if (sr->flags & IORING_RECVSEND_BUNDLE) {
864 size_t this_ret = sel->val - sr->done_io;
867 if (sr->flags & IORING_RECV_RETRY)
869 if (sr->mshot_len && sel->val >= sr->mshot_len)
870 sr->flags |= IORING_RECV_MSHOT_CAP;
878 if (!(sr->flags & IORING_RECV_NO_RETRY) &&
882 sr->len = kmsg->msg.msg_inq;
883 sr->done_io += this_ret;
884 sr->flags |= IORING_RECV_RETRY;
901 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY &&
902 !(sr->flags & IORING_RECV_MSHOT_CAP)) {
906 sr->nr_multishot_loops = 0;
907 sr->flags &= ~IORING_RECV_MSHOT_CAP;
923 struct io_sr_msg *sr, void __user **buf,
941 sr->buf = *buf; /* stash for later copy */
1008 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1022 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1025 flags = sr->msg_flags;
1032 size_t len = sr->len;
1034 sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
1039 ret = io_recvmsg_prep_multishot(kmsg, sr, &sel.addr, &len);
1052 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
1059 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
1069 sr->done_io += ret;
1080 ret += sr->done_io;
1081 else if (sr->done_io)
1082 ret = sr->done_io;
1096 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1105 sr->flags & IORING_RECVSEND_BUNDLE) {
1110 .buf_group = sr->buf_group,
1125 if (sr->flags & IORING_RECV_MSHOT_LIM)
1126 arg.max_len = min_not_zero(arg.max_len, sr->mshot_total_len);
1137 sr->flags |= IORING_RECV_PARTIAL_MAP;
1141 sr->buf = arg.iovs[0].iov_base;
1142 sr->len = arg.iovs[0].iov_len;
1150 *sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
1153 sr->buf = sel->addr;
1154 sr->len = len;
1156 ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1167 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1177 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1184 flags = sr->msg_flags;
1191 sel.val = sr->len;
1197 sr->buf = NULL;
1213 sr->len -= ret;
1214 sr->buf += ret;
1215 sr->done_io += ret;
1228 ret += sr->done_io;
1229 else if (sr->done_io)
1230 ret = sr->done_io;
1443 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1446 WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF));
1448 sr->notif->buf_index = req->buf_index;
1449 return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
1450 (u64)(uintptr_t)sr->buf, sr->len,
1525 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1549 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1552 flags = sr->msg_flags;
1558 kmsg->msg.msg_control_user = sr->msg_control;
1559 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1567 sr->done_io += ret;
1576 ret += sr->done_io;
1577 else if (sr->done_io)
1578 ret = sr->done_io;
1585 io_notif_flush(sr->notif);
1586 sr->notif = NULL;
1595 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1597 if (sr->done_io)
1598 req->cqe.res = sr->done_io;