Lines Matching refs:sr

203 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);  in io_mshot_prep_retry()  local
206 sr->done_io = 0; in io_mshot_prep_retry()
207 sr->flags &= ~IORING_RECV_RETRY_CLEAR; in io_mshot_prep_retry()
208 sr->len = sr->mshot_len; in io_mshot_prep_retry()
243 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_compat_msg_copy_hdr() local
247 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) in io_compat_msg_copy_hdr()
257 sr->len = 0; in io_compat_msg_copy_hdr()
265 sr->len = tmp_iov.iov_len; in io_compat_msg_copy_hdr()
293 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_msg_copy_hdr() local
294 struct user_msghdr __user *umsg = sr->umsg; in io_msg_copy_hdr()
327 sr->len = 0; in io_msg_copy_hdr()
336 sr->len = tmp_iov.iov_len; in io_msg_copy_hdr()
351 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_setup() local
357 sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_send_setup()
377 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { in io_send_setup()
384 if (sr->flags & IORING_SEND_VECTORIZED) in io_send_setup()
385 return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); in io_send_setup()
387 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); in io_send_setup()
392 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg_setup() local
397 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_sendmsg_setup()
402 sr->msg_control = kmsg->msg.msg_control_user; in io_sendmsg_setup()
404 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { in io_sendmsg_setup()
418 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg_prep() local
420 sr->done_io = 0; in io_sendmsg_prep()
421 sr->len = READ_ONCE(sqe->len); in io_sendmsg_prep()
422 sr->flags = READ_ONCE(sqe->ioprio); in io_sendmsg_prep()
423 if (sr->flags & ~SENDMSG_FLAGS) in io_sendmsg_prep()
425 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; in io_sendmsg_prep()
426 if (sr->msg_flags & MSG_DONTWAIT) in io_sendmsg_prep()
429 sr->buf_group = req->buf_index; in io_sendmsg_prep()
430 if (sr->flags & IORING_RECVSEND_BUNDLE) { in io_sendmsg_prep()
433 sr->msg_flags |= MSG_WAITALL; in io_sendmsg_prep()
438 sr->msg_flags |= MSG_CMSG_COMPAT; in io_sendmsg_prep()
507 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_finish() local
511 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) { in io_send_finish()
539 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg() local
551 (sr->flags & IORING_RECVSEND_POLL_FIRST)) in io_sendmsg()
554 flags = sr->msg_flags; in io_sendmsg()
560 kmsg->msg.msg_control_user = sr->msg_control; in io_sendmsg()
570 sr->done_io += ret; in io_sendmsg()
579 ret += sr->done_io; in io_sendmsg()
580 else if (sr->done_io) in io_sendmsg()
581 ret = sr->done_io; in io_sendmsg()
589 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_select_buffer() local
592 .max_len = min_not_zero(sr->len, INT_MAX), in io_send_select_buffer()
594 .buf_group = sr->buf_group, in io_send_select_buffer()
604 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) in io_send_select_buffer()
618 sr->len = arg.out_len; in io_send_select_buffer()
621 sr->buf = arg.iovs[0].iov_base; in io_send_select_buffer()
622 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, in io_send_select_buffer()
636 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send() local
649 (sr->flags & IORING_RECVSEND_POLL_FIRST)) in io_send()
652 flags = sr->msg_flags; in io_send()
669 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE) in io_send()
680 sr->len -= ret; in io_send()
681 sr->buf += ret; in io_send()
682 sr->done_io += ret; in io_send()
690 ret += sr->done_io; in io_send()
691 else if (sr->done_io) in io_send()
692 ret = sr->done_io; in io_send()
748 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recvmsg_prep_setup() local
767 return import_ubuf(ITER_DEST, sr->buf, sr->len, in io_recvmsg_prep_setup()
779 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recvmsg_prep() local
781 sr->done_io = 0; in io_recvmsg_prep()
786 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_recvmsg_prep()
787 sr->len = READ_ONCE(sqe->len); in io_recvmsg_prep()
788 sr->flags = READ_ONCE(sqe->ioprio); in io_recvmsg_prep()
789 if (sr->flags & ~RECVMSG_FLAGS) in io_recvmsg_prep()
791 sr->msg_flags = READ_ONCE(sqe->msg_flags); in io_recvmsg_prep()
792 if (sr->msg_flags & MSG_DONTWAIT) in io_recvmsg_prep()
794 if (sr->msg_flags & MSG_ERRQUEUE) in io_recvmsg_prep()
797 sr->buf_group = req->buf_index; in io_recvmsg_prep()
798 sr->mshot_total_len = sr->mshot_len = 0; in io_recvmsg_prep()
799 if (sr->flags & IORING_RECV_MULTISHOT) { in io_recvmsg_prep()
802 if (sr->msg_flags & MSG_WAITALL) in io_recvmsg_prep()
805 sr->mshot_len = sr->len; in io_recvmsg_prep()
806 sr->mshot_total_len = READ_ONCE(sqe->optlen); in io_recvmsg_prep()
807 if (sr->mshot_total_len) in io_recvmsg_prep()
808 sr->flags |= IORING_RECV_MSHOT_LIM; in io_recvmsg_prep()
817 if (sr->flags & IORING_RECVSEND_BUNDLE) { in io_recvmsg_prep()
823 sr->msg_flags |= MSG_CMSG_COMPAT; in io_recvmsg_prep()
825 sr->nr_multishot_loops = 0; in io_recvmsg_prep()
843 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recv_finish() local
849 if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) { in io_recv_finish()
855 sr->mshot_total_len -= min_t(int, sel->val, sr->mshot_total_len); in io_recv_finish()
856 if (!sr->mshot_total_len) { in io_recv_finish()
857 sr->flags |= IORING_RECV_MSHOT_DONE; in io_recv_finish()
862 if (sr->flags & IORING_RECVSEND_BUNDLE) { in io_recv_finish()
863 size_t this_ret = sel->val - sr->done_io; in io_recv_finish()
866 if (sr->flags & IORING_RECV_RETRY) in io_recv_finish()
868 if (sr->mshot_len && sel->val >= sr->mshot_len) in io_recv_finish()
869 sr->flags |= IORING_RECV_MSHOT_CAP; in io_recv_finish()
877 if (!(sr->flags & IORING_RECV_NO_RETRY) && in io_recv_finish()
881 sr->len = kmsg->msg.msg_inq; in io_recv_finish()
882 sr->done_io += this_ret; in io_recv_finish()
883 sr->flags |= IORING_RECV_RETRY; in io_recv_finish()
900 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY && in io_recv_finish()
901 !(sr->flags & IORING_RECV_MSHOT_CAP)) { in io_recv_finish()
905 sr->nr_multishot_loops = 0; in io_recv_finish()
906 sr->flags &= ~IORING_RECV_MSHOT_CAP; in io_recv_finish()
922 struct io_sr_msg *sr, void __user **buf, in io_recvmsg_prep_multishot() argument
940 sr->buf = *buf; /* stash for later copy */ in io_recvmsg_prep_multishot()
1007 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recvmsg() local
1021 (sr->flags & IORING_RECVSEND_POLL_FIRST)) in io_recvmsg()
1024 flags = sr->msg_flags; in io_recvmsg()
1031 size_t len = sr->len; in io_recvmsg()
1033 sel = io_buffer_select(req, &len, sr->buf_group, issue_flags); in io_recvmsg()
1038 ret = io_recvmsg_prep_multishot(kmsg, sr, &sel.addr, &len); in io_recvmsg()
1051 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, in io_recvmsg()
1058 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, in io_recvmsg()
1068 sr->done_io += ret; in io_recvmsg()
1079 ret += sr->done_io; in io_recvmsg()
1080 else if (sr->done_io) in io_recvmsg()
1081 ret = sr->done_io; in io_recvmsg()
1095 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recv_buf_select() local
1104 sr->flags & IORING_RECVSEND_BUNDLE) { in io_recv_buf_select()
1109 .buf_group = sr->buf_group, in io_recv_buf_select()
1124 if (sr->flags & IORING_RECV_MSHOT_LIM) in io_recv_buf_select()
1125 arg.max_len = min_not_zero(arg.max_len, sr->mshot_total_len); in io_recv_buf_select()
1136 sr->flags |= IORING_RECV_PARTIAL_MAP; in io_recv_buf_select()
1140 sr->buf = arg.iovs[0].iov_base; in io_recv_buf_select()
1141 sr->len = arg.iovs[0].iov_len; in io_recv_buf_select()
1149 *sel = io_buffer_select(req, &len, sr->buf_group, issue_flags); in io_recv_buf_select()
1152 sr->buf = sel->addr; in io_recv_buf_select()
1153 sr->len = len; in io_recv_buf_select()
1155 ret = import_ubuf(ITER_DEST, sr->buf, sr->len, in io_recv_buf_select()
1166 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_recv() local
1176 (sr->flags & IORING_RECVSEND_POLL_FIRST)) in io_recv()
1183 flags = sr->msg_flags; in io_recv()
1190 sel.val = sr->len; in io_recv()
1196 sr->buf = NULL; in io_recv()
1212 sr->len -= ret; in io_recv()
1213 sr->buf += ret; in io_recv()
1214 sr->done_io += ret; in io_recv()
1227 ret += sr->done_io; in io_recv()
1228 else if (sr->done_io) in io_recv()
1229 ret = sr->done_io; in io_recv()
1440 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_send_zc_import() local
1443 WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF)); in io_send_zc_import()
1445 sr->notif->buf_index = req->buf_index; in io_send_zc_import()
1446 return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter, in io_send_zc_import()
1447 (u64)(uintptr_t)sr->buf, sr->len, in io_send_zc_import()
1522 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendmsg_zc() local
1532 sr->notif->buf_index = req->buf_index; in io_sendmsg_zc()
1534 sr->notif, &kmsg->vec, uvec_segs, in io_sendmsg_zc()
1548 (sr->flags & IORING_RECVSEND_POLL_FIRST)) in io_sendmsg_zc()
1551 flags = sr->msg_flags; in io_sendmsg_zc()
1557 kmsg->msg.msg_control_user = sr->msg_control; in io_sendmsg_zc()
1558 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; in io_sendmsg_zc()
1566 sr->done_io += ret; in io_sendmsg_zc()
1575 ret += sr->done_io; in io_sendmsg_zc()
1576 else if (sr->done_io) in io_sendmsg_zc()
1577 ret = sr->done_io; in io_sendmsg_zc()
1584 io_notif_flush(sr->notif); in io_sendmsg_zc()
1585 sr->notif = NULL; in io_sendmsg_zc()
1594 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); in io_sendrecv_fail() local
1596 if (sr->done_io) in io_sendrecv_fail()
1597 req->cqe.res = sr->done_io; in io_sendrecv_fail()