Lines Matching full:msg

13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
15 if (msg->sg.end > msg->sg.start &&
16 elem_first_coalesce < msg->sg.end)
19 if (msg->sg.end < msg->sg.start &&
20 (elem_first_coalesce > msg->sg.start ||
21 elem_first_coalesce < msg->sg.end))
27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
31 u32 osize = msg->sg.size;
34 len -= msg->sg.size;
52 i = msg->sg.end;
54 sge = &msg->sg.data[i];
56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
61 if (sk_msg_full(msg)) {
66 sge = &msg->sg.data[msg->sg.end];
70 sk_msg_iter_next(msg, end);
74 msg->sg.size += use;
82 sk_msg_trim(sk, msg, osize);
138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
140 int i = msg->sg.start;
143 struct scatterlist *sge = sk_msg_elem(msg, i);
157 } while (bytes && i != msg->sg.end);
158 msg->sg.start = i;
162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
164 int i = msg->sg.start;
167 struct scatterlist *sge = &msg->sg.data[i];
173 } while (i != msg->sg.end);
177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
180 struct scatterlist *sge = sk_msg_elem(msg, i);
184 if (!msg->skb) {
193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
196 struct scatterlist *sge = sk_msg_elem(msg, i);
199 while (msg->sg.size) {
200 msg->sg.size -= sge->length;
201 freed += sk_msg_free_elem(sk, msg, i, charge);
203 sk_msg_check_to_free(msg, i, msg->sg.size);
204 sge = sk_msg_elem(msg, i);
206 consume_skb(msg->skb);
207 sk_msg_init(msg);
211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
213 return __sk_msg_free(sk, msg, msg->sg.start, false);
217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
219 return __sk_msg_free(sk, msg, msg->sg.start, true);
223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
227 u32 i = msg->sg.start;
230 sge = sk_msg_elem(msg, i);
238 msg->sg.size -= bytes;
242 msg->sg.size -= sge->length;
244 sk_msg_free_elem(sk, msg, i, charge);
246 sk_msg_check_to_free(msg, i, bytes);
248 msg->sg.start = i;
251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
253 __sk_msg_free_partial(sk, msg, bytes, true);
257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
260 __sk_msg_free_partial(sk, msg, bytes, false);
263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
265 int trim = msg->sg.size - len;
266 u32 i = msg->sg.end;
274 msg->sg.size = len;
275 while (msg->sg.data[i].length &&
276 trim >= msg->sg.data[i].length) {
277 trim -= msg->sg.data[i].length;
278 sk_msg_free_elem(sk, msg, i, true);
284 msg->sg.data[i].length -= trim;
287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 msg->sg.copybreak = msg->sg.data[i].length;
291 msg->sg.end = i;
299 if (!msg->sg.size) {
300 msg->sg.curr = msg->sg.start;
301 msg->sg.copybreak = 0;
302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
305 msg->sg.curr = i;
306 msg->sg.copybreak = msg->sg.data[i].length;
312 struct sk_msg *msg, u32 bytes)
314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
319 orig = msg->sg.size;
336 msg->sg.size += copied;
340 sg_set_page(&msg->sg.data[msg->sg.end],
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
347 sk_msg_iter_next(msg, end);
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
363 iov_iter_revert(from, msg->sg.size - orig);
369 struct sk_msg *msg, u32 bytes)
371 int ret = -ENOSPC, i = msg->sg.curr;
377 sge = sk_msg_elem(msg, i);
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
382 if (i == msg->sg.end)
384 sge = sk_msg_elem(msg, i);
387 buf_size = sge->length - msg->sg.copybreak;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
403 msg->sg.copybreak = 0;
405 } while (i != msg->sg.end);
407 msg->sg.curr = i;
412 /* Receive sk_msg from psock->ingress_msg to @msg. */
413 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
416 struct iov_iter *iter = &msg->msg_iter;
508 struct sk_msg *msg;
510 msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
511 if (unlikely(!msg))
513 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
514 return msg;
533 struct sk_msg *msg,
542 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
553 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
562 msg->sg.start = 0;
563 msg->sg.size = copied;
564 msg->sg.end = num_sge;
565 msg->skb = take_ref ? skb_get(skb) : skb;
567 sk_psock_queue_msg(psock, msg);
579 struct sk_msg *msg;
588 msg = sk_psock_create_ingress_msg(sk, skb);
589 if (!msg)
599 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
601 kfree(msg);
612 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
616 if (unlikely(!msg))
619 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
621 kfree(msg);
791 struct sk_msg *msg, *tmp;
793 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
794 list_del(&msg->list);
795 if (!msg->skb)
796 atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
797 sk_msg_free(psock->sk, msg);
798 kfree(msg);
890 struct sk_msg *msg)
902 sk_msg_compute_data_pointers(msg);
903 msg->sk = sk;
904 ret = bpf_prog_run_pin_on_cpu(prog, msg);
905 ret = sk_psock_map_verd(ret, msg->sk_redir);
906 psock->apply_bytes = msg->apply_bytes;
912 if (!msg->sk_redir) {
916 psock->redir_ingress = sk_msg_to_ingress(msg);
917 psock->sk_redir = msg->sk_redir;
1017 * into the msg queue. If its not empty we have to