xref: /linux/net/core/skmsg.c (revision 4c0a42c50021ee509f159c1f8a22efb35987c941)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 #include <trace/events/sock.h>
12 
13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 {
15 	if (msg->sg.end > msg->sg.start &&
16 	    elem_first_coalesce < msg->sg.end)
17 		return true;
18 
19 	if (msg->sg.end < msg->sg.start &&
20 	    (elem_first_coalesce > msg->sg.start ||
21 	     elem_first_coalesce < msg->sg.end))
22 		return true;
23 
24 	return false;
25 }
26 
27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 		 int elem_first_coalesce)
29 {
30 	struct page_frag *pfrag = sk_page_frag(sk);
31 	u32 osize = msg->sg.size;
32 	int ret = 0;
33 
34 	len -= msg->sg.size;
35 	while (len > 0) {
36 		struct scatterlist *sge;
37 		u32 orig_offset;
38 		int use, i;
39 
40 		if (!sk_page_frag_refill(sk, pfrag)) {
41 			ret = -ENOMEM;
42 			goto msg_trim;
43 		}
44 
45 		orig_offset = pfrag->offset;
46 		use = min_t(int, len, pfrag->size - orig_offset);
47 		if (!sk_wmem_schedule(sk, use)) {
48 			ret = -ENOMEM;
49 			goto msg_trim;
50 		}
51 
52 		i = msg->sg.end;
53 		sk_msg_iter_var_prev(i);
54 		sge = &msg->sg.data[i];
55 
56 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 		    sg_page(sge) == pfrag->page &&
58 		    sge->offset + sge->length == orig_offset) {
59 			sge->length += use;
60 		} else {
61 			if (sk_msg_full(msg)) {
62 				ret = -ENOSPC;
63 				break;
64 			}
65 
66 			sge = &msg->sg.data[msg->sg.end];
67 			sg_unmark_end(sge);
68 			sg_set_page(sge, pfrag->page, use, orig_offset);
69 			get_page(pfrag->page);
70 			sk_msg_iter_next(msg, end);
71 		}
72 
73 		sk_mem_charge(sk, use);
74 		msg->sg.size += use;
75 		pfrag->offset += use;
76 		len -= use;
77 	}
78 
79 	return ret;
80 
81 msg_trim:
82 	sk_msg_trim(sk, msg, osize);
83 	return ret;
84 }
85 EXPORT_SYMBOL_GPL(sk_msg_alloc);
86 
87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 		 u32 off, u32 len)
89 {
90 	int i = src->sg.start;
91 	struct scatterlist *sge = sk_msg_elem(src, i);
92 	struct scatterlist *sgd = NULL;
93 	u32 sge_len, sge_off;
94 
95 	while (off) {
96 		if (sge->length > off)
97 			break;
98 		off -= sge->length;
99 		sk_msg_iter_var_next(i);
100 		if (i == src->sg.end && off)
101 			return -ENOSPC;
102 		sge = sk_msg_elem(src, i);
103 	}
104 
105 	while (len) {
106 		sge_len = sge->length - off;
107 		if (sge_len > len)
108 			sge_len = len;
109 
110 		if (dst->sg.end)
111 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
112 
113 		if (sgd &&
114 		    (sg_page(sge) == sg_page(sgd)) &&
115 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 			sgd->length += sge_len;
117 			dst->sg.size += sge_len;
118 		} else if (!sk_msg_full(dst)) {
119 			sge_off = sge->offset + off;
120 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 		} else {
122 			return -ENOSPC;
123 		}
124 
125 		off = 0;
126 		len -= sge_len;
127 		sk_mem_charge(sk, sge_len);
128 		sk_msg_iter_var_next(i);
129 		if (i == src->sg.end && len)
130 			return -ENOSPC;
131 		sge = sk_msg_elem(src, i);
132 	}
133 
134 	return 0;
135 }
136 EXPORT_SYMBOL_GPL(sk_msg_clone);
137 
138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139 {
140 	int i = msg->sg.start;
141 
142 	do {
143 		struct scatterlist *sge = sk_msg_elem(msg, i);
144 
145 		if (bytes < sge->length) {
146 			sge->length -= bytes;
147 			sge->offset += bytes;
148 			sk_mem_uncharge(sk, bytes);
149 			break;
150 		}
151 
152 		sk_mem_uncharge(sk, sge->length);
153 		bytes -= sge->length;
154 		sge->length = 0;
155 		sge->offset = 0;
156 		sk_msg_iter_var_next(i);
157 	} while (bytes && i != msg->sg.end);
158 	msg->sg.start = i;
159 }
160 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161 
162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163 {
164 	int i = msg->sg.start;
165 
166 	do {
167 		struct scatterlist *sge = &msg->sg.data[i];
168 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
169 
170 		sk_mem_uncharge(sk, uncharge);
171 		bytes -= uncharge;
172 		sk_msg_iter_var_next(i);
173 	} while (i != msg->sg.end);
174 }
175 EXPORT_SYMBOL_GPL(sk_msg_return);
176 
177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 			    bool charge)
179 {
180 	struct scatterlist *sge = sk_msg_elem(msg, i);
181 	u32 len = sge->length;
182 
183 	/* When the skb owns the memory we free it from consume_skb path. */
184 	if (!msg->skb) {
185 		if (charge)
186 			sk_mem_uncharge(sk, len);
187 		put_page(sg_page(sge));
188 	}
189 	memset(sge, 0, sizeof(*sge));
190 	return len;
191 }
192 
193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 			 bool charge)
195 {
196 	struct scatterlist *sge = sk_msg_elem(msg, i);
197 	int freed = 0;
198 
199 	while (msg->sg.size) {
200 		msg->sg.size -= sge->length;
201 		freed += sk_msg_free_elem(sk, msg, i, charge);
202 		sk_msg_iter_var_next(i);
203 		sk_msg_check_to_free(msg, i, msg->sg.size);
204 		sge = sk_msg_elem(msg, i);
205 	}
206 	consume_skb(msg->skb);
207 	sk_msg_init(msg);
208 	return freed;
209 }
210 
211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212 {
213 	return __sk_msg_free(sk, msg, msg->sg.start, false);
214 }
215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216 
217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218 {
219 	return __sk_msg_free(sk, msg, msg->sg.start, true);
220 }
221 EXPORT_SYMBOL_GPL(sk_msg_free);
222 
223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 				  u32 bytes, bool charge)
225 {
226 	struct scatterlist *sge;
227 	u32 i = msg->sg.start;
228 
229 	while (bytes) {
230 		sge = sk_msg_elem(msg, i);
231 		if (!sge->length)
232 			break;
233 		if (bytes < sge->length) {
234 			if (charge)
235 				sk_mem_uncharge(sk, bytes);
236 			sge->length -= bytes;
237 			sge->offset += bytes;
238 			msg->sg.size -= bytes;
239 			break;
240 		}
241 
242 		msg->sg.size -= sge->length;
243 		bytes -= sge->length;
244 		sk_msg_free_elem(sk, msg, i, charge);
245 		sk_msg_iter_var_next(i);
246 		sk_msg_check_to_free(msg, i, bytes);
247 	}
248 	msg->sg.start = i;
249 }
250 
251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252 {
253 	__sk_msg_free_partial(sk, msg, bytes, true);
254 }
255 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256 
257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 				  u32 bytes)
259 {
260 	__sk_msg_free_partial(sk, msg, bytes, false);
261 }
262 
263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264 {
265 	int trim = msg->sg.size - len;
266 	u32 i = msg->sg.end;
267 
268 	if (trim <= 0) {
269 		WARN_ON(trim < 0);
270 		return;
271 	}
272 
273 	sk_msg_iter_var_prev(i);
274 	msg->sg.size = len;
275 	while (msg->sg.data[i].length &&
276 	       trim >= msg->sg.data[i].length) {
277 		trim -= msg->sg.data[i].length;
278 		sk_msg_free_elem(sk, msg, i, true);
279 		sk_msg_iter_var_prev(i);
280 		if (!trim)
281 			goto out;
282 	}
283 
284 	msg->sg.data[i].length -= trim;
285 	sk_mem_uncharge(sk, trim);
286 	/* Adjust copybreak if it falls into the trimmed part of last buf */
287 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 		msg->sg.copybreak = msg->sg.data[i].length;
289 out:
290 	sk_msg_iter_var_next(i);
291 	msg->sg.end = i;
292 
293 	/* If we trim data a full sg elem before curr pointer update
294 	 * copybreak and current so that any future copy operations
295 	 * start at new copy location.
296 	 * However trimmed data that has not yet been used in a copy op
297 	 * does not require an update.
298 	 */
299 	if (!msg->sg.size) {
300 		msg->sg.curr = msg->sg.start;
301 		msg->sg.copybreak = 0;
302 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 		sk_msg_iter_var_prev(i);
305 		msg->sg.curr = i;
306 		msg->sg.copybreak = msg->sg.data[i].length;
307 	}
308 }
309 EXPORT_SYMBOL_GPL(sk_msg_trim);
310 
311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 			      struct sk_msg *msg, u32 bytes)
313 {
314 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 	const int to_max_pages = MAX_MSG_FRAGS;
316 	struct page *pages[MAX_MSG_FRAGS];
317 	ssize_t orig, copied, use, offset;
318 
319 	orig = msg->sg.size;
320 	while (bytes > 0) {
321 		i = 0;
322 		maxpages = to_max_pages - num_elems;
323 		if (maxpages == 0) {
324 			ret = -EFAULT;
325 			goto out;
326 		}
327 
328 		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 					    &offset);
330 		if (copied <= 0) {
331 			ret = -EFAULT;
332 			goto out;
333 		}
334 
335 		bytes -= copied;
336 		msg->sg.size += copied;
337 
338 		while (copied) {
339 			use = min_t(int, copied, PAGE_SIZE - offset);
340 			sg_set_page(&msg->sg.data[msg->sg.end],
341 				    pages[i], use, offset);
342 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 			sk_mem_charge(sk, use);
344 
345 			offset = 0;
346 			copied -= use;
347 			sk_msg_iter_next(msg, end);
348 			num_elems++;
349 			i++;
350 		}
351 		/* When zerocopy is mixed with sk_msg_*copy* operations we
352 		 * may have a copybreak set in this case clear and prefer
353 		 * zerocopy remainder when possible.
354 		 */
355 		msg->sg.copybreak = 0;
356 		msg->sg.curr = msg->sg.end;
357 	}
358 out:
359 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
360 	 * also needs to be cleared.
361 	 */
362 	if (ret)
363 		iov_iter_revert(from, msg->sg.size - orig);
364 	return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367 
368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 			     struct sk_msg *msg, u32 bytes)
370 {
371 	int ret = -ENOSPC, i = msg->sg.curr;
372 	u32 copy, buf_size, copied = 0;
373 	struct scatterlist *sge;
374 	void *to;
375 
376 	do {
377 		sge = sk_msg_elem(msg, i);
378 		/* This is possible if a trim operation shrunk the buffer */
379 		if (msg->sg.copybreak >= sge->length) {
380 			msg->sg.copybreak = 0;
381 			sk_msg_iter_var_next(i);
382 			if (i == msg->sg.end)
383 				break;
384 			sge = sk_msg_elem(msg, i);
385 		}
386 
387 		buf_size = sge->length - msg->sg.copybreak;
388 		copy = (buf_size > bytes) ? bytes : buf_size;
389 		to = sg_virt(sge) + msg->sg.copybreak;
390 		msg->sg.copybreak += copy;
391 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 			ret = copy_from_iter_nocache(to, copy, from);
393 		else
394 			ret = copy_from_iter(to, copy, from);
395 		if (ret != copy) {
396 			ret = -EFAULT;
397 			goto out;
398 		}
399 		bytes -= copy;
400 		copied += copy;
401 		if (!bytes)
402 			break;
403 		msg->sg.copybreak = 0;
404 		sk_msg_iter_var_next(i);
405 	} while (i != msg->sg.end);
406 out:
407 	msg->sg.curr = i;
408 	return (ret < 0) ? ret : copied;
409 }
410 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
411 
412 /* Receive sk_msg from psock->ingress_msg to @msg. */
413 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
414 		   int len, int flags)
415 {
416 	struct iov_iter *iter = &msg->msg_iter;
417 	int peek = flags & MSG_PEEK;
418 	struct sk_msg *msg_rx;
419 	int i, copied = 0;
420 
421 	msg_rx = sk_psock_peek_msg(psock);
422 	while (copied != len) {
423 		struct scatterlist *sge;
424 
425 		if (unlikely(!msg_rx))
426 			break;
427 
428 		i = msg_rx->sg.start;
429 		do {
430 			struct page *page;
431 			int copy;
432 
433 			sge = sk_msg_elem(msg_rx, i);
434 			copy = sge->length;
435 			page = sg_page(sge);
436 			if (copied + copy > len)
437 				copy = len - copied;
438 			if (copy)
439 				copy = copy_page_to_iter(page, sge->offset, copy, iter);
440 			if (!copy) {
441 				copied = copied ? copied : -EFAULT;
442 				goto out;
443 			}
444 
445 			copied += copy;
446 			if (likely(!peek)) {
447 				sge->offset += copy;
448 				sge->length -= copy;
449 				if (!msg_rx->skb) {
450 					sk_mem_uncharge(sk, copy);
451 					atomic_sub(copy, &sk->sk_rmem_alloc);
452 				}
453 				msg_rx->sg.size -= copy;
454 
455 				if (!sge->length) {
456 					sk_msg_iter_var_next(i);
457 					if (!msg_rx->skb)
458 						put_page(page);
459 				}
460 			} else {
461 				/* Lets not optimize peek case if copy_page_to_iter
462 				 * didn't copy the entire length lets just break.
463 				 */
464 				if (copy != sge->length)
465 					goto out;
466 				sk_msg_iter_var_next(i);
467 			}
468 
469 			if (copied == len)
470 				break;
471 		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
472 
473 		if (unlikely(peek)) {
474 			msg_rx = sk_psock_next_msg(psock, msg_rx);
475 			if (!msg_rx)
476 				break;
477 			continue;
478 		}
479 
480 		msg_rx->sg.start = i;
481 		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
482 			msg_rx = sk_psock_dequeue_msg(psock);
483 			kfree_sk_msg(msg_rx);
484 		}
485 		msg_rx = sk_psock_peek_msg(psock);
486 	}
487 out:
488 	return copied;
489 }
490 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
491 
492 bool sk_msg_is_readable(struct sock *sk)
493 {
494 	struct sk_psock *psock;
495 	bool empty = true;
496 
497 	rcu_read_lock();
498 	psock = sk_psock(sk);
499 	if (likely(psock))
500 		empty = list_empty(&psock->ingress_msg);
501 	rcu_read_unlock();
502 	return !empty;
503 }
504 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
505 
506 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
507 {
508 	struct sk_msg *msg;
509 
510 	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
511 	if (unlikely(!msg))
512 		return NULL;
513 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
514 	return msg;
515 }
516 
517 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
518 						  struct sk_buff *skb)
519 {
520 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
521 		return NULL;
522 
523 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
524 		return NULL;
525 
526 	return alloc_sk_msg(GFP_KERNEL);
527 }
528 
529 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
530 					u32 off, u32 len,
531 					struct sk_psock *psock,
532 					struct sock *sk,
533 					struct sk_msg *msg,
534 					bool take_ref)
535 {
536 	int num_sge, copied;
537 
538 	/* skb_to_sgvec will fail when the total number of fragments in
539 	 * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the
540 	 * caller may aggregate multiple skbs.
541 	 */
542 	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
543 	if (num_sge < 0) {
544 		/* skb linearize may fail with ENOMEM, but lets simply try again
545 		 * later if this happens. Under memory pressure we don't want to
546 		 * drop the skb. We need to linearize the skb so that the mapping
547 		 * in skb_to_sgvec can not error.
548 		 * Note that skb_linearize requires the skb not to be shared.
549 		 */
550 		if (skb_linearize(skb))
551 			return -EAGAIN;
552 
553 		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
554 		if (unlikely(num_sge < 0))
555 			return num_sge;
556 	}
557 
558 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
559 	psock->ingress_bytes += len;
560 #endif
561 	copied = len;
562 	msg->sg.start = 0;
563 	msg->sg.size = copied;
564 	msg->sg.end = num_sge;
565 	msg->skb = take_ref ? skb_get(skb) : skb;
566 
567 	sk_psock_queue_msg(psock, msg);
568 	sk_psock_data_ready(sk, psock);
569 	return copied;
570 }
571 
572 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
573 				     u32 off, u32 len, bool take_ref);
574 
575 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
576 				u32 off, u32 len)
577 {
578 	struct sock *sk = psock->sk;
579 	struct sk_msg *msg;
580 	int err;
581 
582 	/* If we are receiving on the same sock skb->sk is already assigned,
583 	 * skip memory accounting and owner transition seeing it already set
584 	 * correctly.
585 	 */
586 	if (unlikely(skb->sk == sk))
587 		return sk_psock_skb_ingress_self(psock, skb, off, len, true);
588 	msg = sk_psock_create_ingress_msg(sk, skb);
589 	if (!msg)
590 		return -EAGAIN;
591 
592 	/* This will transition ownership of the data from the socket where
593 	 * the BPF program was run initiating the redirect to the socket
594 	 * we will eventually receive this data on. The data will be released
595 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
596 	 * into user buffers.
597 	 */
598 	skb_set_owner_r(skb, sk);
599 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
600 	if (err < 0)
601 		kfree(msg);
602 	return err;
603 }
604 
605 /* Puts an skb on the ingress queue of the socket already assigned to the
606  * skb. In this case we do not need to check memory limits or skb_set_owner_r
607  * because the skb is already accounted for here.
608  */
609 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
610 				     u32 off, u32 len, bool take_ref)
611 {
612 	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
613 	struct sock *sk = psock->sk;
614 	int err;
615 
616 	if (unlikely(!msg))
617 		return -EAGAIN;
618 	skb_set_owner_r(skb, sk);
619 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
620 	if (err < 0)
621 		kfree(msg);
622 	return err;
623 }
624 
625 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
626 			       u32 off, u32 len, bool ingress)
627 {
628 	if (!ingress) {
629 		if (!sock_writeable(psock->sk))
630 			return -EAGAIN;
631 		return skb_send_sock(psock->sk, skb, off, len);
632 	}
633 
634 	return sk_psock_skb_ingress(psock, skb, off, len);
635 }
636 
637 static void sk_psock_skb_state(struct sk_psock *psock,
638 			       struct sk_psock_work_state *state,
639 			       int len, int off)
640 {
641 	spin_lock_bh(&psock->ingress_lock);
642 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
643 		state->len = len;
644 		state->off = off;
645 	}
646 	spin_unlock_bh(&psock->ingress_lock);
647 }
648 
649 static void sk_psock_backlog(struct work_struct *work)
650 {
651 	struct delayed_work *dwork = to_delayed_work(work);
652 	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
653 	struct sk_psock_work_state *state = &psock->work_state;
654 	struct sk_buff *skb = NULL;
655 	u32 len = 0, off = 0;
656 	bool ingress;
657 	int ret;
658 
659 	mutex_lock(&psock->work_mutex);
660 	while ((skb = skb_peek(&psock->ingress_skb))) {
661 		len = skb->len;
662 		off = 0;
663 		if (skb_bpf_strparser(skb)) {
664 			struct strp_msg *stm = strp_msg(skb);
665 
666 			off = stm->offset;
667 			len = stm->full_len;
668 		}
669 
670 		/* Resume processing from previous partial state */
671 		if (unlikely(state->len)) {
672 			len = state->len;
673 			off = state->off;
674 		}
675 
676 		ingress = skb_bpf_ingress(skb);
677 		skb_bpf_redirect_clear(skb);
678 		do {
679 			ret = -EIO;
680 			if (!sock_flag(psock->sk, SOCK_DEAD))
681 				ret = sk_psock_handle_skb(psock, skb, off,
682 							  len, ingress);
683 			if (ret <= 0) {
684 				if (ret == -EAGAIN) {
685 					sk_psock_skb_state(psock, state, len, off);
686 					/* Restore redir info we cleared before */
687 					skb_bpf_set_redir(skb, psock->sk, ingress);
688 					/* Delay slightly to prioritize any
689 					 * other work that might be here.
690 					 */
691 					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
692 						schedule_delayed_work(&psock->work, 1);
693 					goto end;
694 				}
695 				/* Hard errors break pipe and stop xmit. */
696 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
697 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
698 				goto end;
699 			}
700 			off += ret;
701 			len -= ret;
702 		} while (len);
703 
704 		/* The entire skb sent, clear state */
705 		sk_psock_skb_state(psock, state, 0, 0);
706 		skb = skb_dequeue(&psock->ingress_skb);
707 		kfree_skb(skb);
708 	}
709 end:
710 	mutex_unlock(&psock->work_mutex);
711 }
712 
713 struct sk_psock *sk_psock_init(struct sock *sk, int node)
714 {
715 	struct sk_psock *psock;
716 	struct proto *prot;
717 
718 	write_lock_bh(&sk->sk_callback_lock);
719 
720 	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
721 		psock = ERR_PTR(-EINVAL);
722 		goto out;
723 	}
724 
725 	if (sk->sk_user_data) {
726 		psock = ERR_PTR(-EBUSY);
727 		goto out;
728 	}
729 
730 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
731 	if (!psock) {
732 		psock = ERR_PTR(-ENOMEM);
733 		goto out;
734 	}
735 
736 	prot = READ_ONCE(sk->sk_prot);
737 	psock->sk = sk;
738 	psock->eval = __SK_NONE;
739 	psock->sk_proto = prot;
740 	psock->saved_unhash = prot->unhash;
741 	psock->saved_destroy = prot->destroy;
742 	psock->saved_close = prot->close;
743 	psock->saved_write_space = sk->sk_write_space;
744 
745 	INIT_LIST_HEAD(&psock->link);
746 	spin_lock_init(&psock->link_lock);
747 
748 	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
749 	mutex_init(&psock->work_mutex);
750 	INIT_LIST_HEAD(&psock->ingress_msg);
751 	spin_lock_init(&psock->ingress_lock);
752 	skb_queue_head_init(&psock->ingress_skb);
753 
754 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
755 	refcount_set(&psock->refcnt, 1);
756 
757 	__rcu_assign_sk_user_data_with_flags(sk, psock,
758 					     SK_USER_DATA_NOCOPY |
759 					     SK_USER_DATA_PSOCK);
760 	sock_hold(sk);
761 
762 out:
763 	write_unlock_bh(&sk->sk_callback_lock);
764 	return psock;
765 }
766 EXPORT_SYMBOL_GPL(sk_psock_init);
767 
768 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
769 {
770 	struct sk_psock_link *link;
771 
772 	spin_lock_bh(&psock->link_lock);
773 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
774 					list);
775 	if (link)
776 		list_del(&link->list);
777 	spin_unlock_bh(&psock->link_lock);
778 	return link;
779 }
780 
781 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
782 {
783 	struct sk_msg *msg, *tmp;
784 
785 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
786 		list_del(&msg->list);
787 		if (!msg->skb)
788 			atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
789 		sk_msg_free(psock->sk, msg);
790 		kfree(msg);
791 	}
792 }
793 
794 static void __sk_psock_zap_ingress(struct sk_psock *psock)
795 {
796 	struct sk_buff *skb;
797 
798 	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
799 		skb_bpf_redirect_clear(skb);
800 		sock_drop(psock->sk, skb);
801 	}
802 	__sk_psock_purge_ingress_msg(psock);
803 }
804 
805 static void sk_psock_link_destroy(struct sk_psock *psock)
806 {
807 	struct sk_psock_link *link, *tmp;
808 
809 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
810 		list_del(&link->list);
811 		sk_psock_free_link(link);
812 	}
813 }
814 
815 void sk_psock_stop(struct sk_psock *psock)
816 {
817 	spin_lock_bh(&psock->ingress_lock);
818 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
819 	sk_psock_cork_free(psock);
820 	spin_unlock_bh(&psock->ingress_lock);
821 }
822 
823 static void sk_psock_done_strp(struct sk_psock *psock);
824 
825 static void sk_psock_destroy(struct work_struct *work)
826 {
827 	struct sk_psock *psock = container_of(to_rcu_work(work),
828 					      struct sk_psock, rwork);
829 	/* No sk_callback_lock since already detached. */
830 
831 	sk_psock_done_strp(psock);
832 
833 	cancel_delayed_work_sync(&psock->work);
834 	__sk_psock_zap_ingress(psock);
835 	mutex_destroy(&psock->work_mutex);
836 
837 	psock_progs_drop(&psock->progs);
838 
839 	sk_psock_link_destroy(psock);
840 	sk_psock_cork_free(psock);
841 
842 	if (psock->sk_redir)
843 		sock_put(psock->sk_redir);
844 	if (psock->sk_pair)
845 		sock_put(psock->sk_pair);
846 	sock_put(psock->sk);
847 	kfree(psock);
848 }
849 
850 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
851 {
852 	write_lock_bh(&sk->sk_callback_lock);
853 	sk_psock_restore_proto(sk, psock);
854 	rcu_assign_sk_user_data(sk, NULL);
855 	if (psock->progs.stream_parser)
856 		sk_psock_stop_strp(sk, psock);
857 	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
858 		sk_psock_stop_verdict(sk, psock);
859 	write_unlock_bh(&sk->sk_callback_lock);
860 
861 	sk_psock_stop(psock);
862 
863 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
864 	queue_rcu_work(system_wq, &psock->rwork);
865 }
866 EXPORT_SYMBOL_GPL(sk_psock_drop);
867 
868 static int sk_psock_map_verd(int verdict, bool redir)
869 {
870 	switch (verdict) {
871 	case SK_PASS:
872 		return redir ? __SK_REDIRECT : __SK_PASS;
873 	case SK_DROP:
874 	default:
875 		break;
876 	}
877 
878 	return __SK_DROP;
879 }
880 
881 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
882 			 struct sk_msg *msg)
883 {
884 	struct bpf_prog *prog;
885 	int ret;
886 
887 	rcu_read_lock();
888 	prog = READ_ONCE(psock->progs.msg_parser);
889 	if (unlikely(!prog)) {
890 		ret = __SK_PASS;
891 		goto out;
892 	}
893 
894 	sk_msg_compute_data_pointers(msg);
895 	msg->sk = sk;
896 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
897 	ret = sk_psock_map_verd(ret, msg->sk_redir);
898 	psock->apply_bytes = msg->apply_bytes;
899 	if (ret == __SK_REDIRECT) {
900 		if (psock->sk_redir) {
901 			sock_put(psock->sk_redir);
902 			psock->sk_redir = NULL;
903 		}
904 		if (!msg->sk_redir) {
905 			ret = __SK_DROP;
906 			goto out;
907 		}
908 		psock->redir_ingress = sk_msg_to_ingress(msg);
909 		psock->sk_redir = msg->sk_redir;
910 		sock_hold(psock->sk_redir);
911 	}
912 out:
913 	rcu_read_unlock();
914 	return ret;
915 }
916 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
917 
918 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
919 {
920 	struct sk_psock *psock_other;
921 	struct sock *sk_other;
922 
923 	sk_other = skb_bpf_redirect_fetch(skb);
924 	/* This error is a buggy BPF program, it returned a redirect
925 	 * return code, but then didn't set a redirect interface.
926 	 */
927 	if (unlikely(!sk_other)) {
928 		skb_bpf_redirect_clear(skb);
929 		sock_drop(from->sk, skb);
930 		return -EIO;
931 	}
932 	psock_other = sk_psock(sk_other);
933 	/* This error indicates the socket is being torn down or had another
934 	 * error that caused the pipe to break. We can't send a packet on
935 	 * a socket that is in this state so we drop the skb.
936 	 */
937 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
938 		skb_bpf_redirect_clear(skb);
939 		sock_drop(from->sk, skb);
940 		return -EIO;
941 	}
942 	spin_lock_bh(&psock_other->ingress_lock);
943 	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
944 		spin_unlock_bh(&psock_other->ingress_lock);
945 		skb_bpf_redirect_clear(skb);
946 		sock_drop(from->sk, skb);
947 		return -EIO;
948 	}
949 
950 	skb_queue_tail(&psock_other->ingress_skb, skb);
951 	schedule_delayed_work(&psock_other->work, 0);
952 	spin_unlock_bh(&psock_other->ingress_lock);
953 	return 0;
954 }
955 
956 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
957 				       struct sk_psock *from, int verdict)
958 {
959 	switch (verdict) {
960 	case __SK_REDIRECT:
961 		sk_psock_skb_redirect(from, skb);
962 		break;
963 	case __SK_PASS:
964 	case __SK_DROP:
965 	default:
966 		break;
967 	}
968 }
969 
970 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
971 {
972 	struct bpf_prog *prog;
973 	int ret = __SK_PASS;
974 
975 	rcu_read_lock();
976 	prog = READ_ONCE(psock->progs.stream_verdict);
977 	if (likely(prog)) {
978 		skb->sk = psock->sk;
979 		skb_dst_drop(skb);
980 		skb_bpf_redirect_clear(skb);
981 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
982 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
983 		skb->sk = NULL;
984 	}
985 	sk_psock_tls_verdict_apply(skb, psock, ret);
986 	rcu_read_unlock();
987 	return ret;
988 }
989 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
990 
991 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
992 				  int verdict)
993 {
994 	struct sock *sk_other;
995 	int err = 0;
996 	u32 len, off;
997 
998 	switch (verdict) {
999 	case __SK_PASS:
1000 		err = -EIO;
1001 		sk_other = psock->sk;
1002 		if (sock_flag(sk_other, SOCK_DEAD) ||
1003 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1004 			goto out_free;
1005 
1006 		skb_bpf_set_ingress(skb);
1007 
1008 		/* If the queue is empty then we can submit directly
1009 		 * into the msg queue. If its not empty we have to
1010 		 * queue work otherwise we may get OOO data. Otherwise,
1011 		 * if sk_psock_skb_ingress errors will be handled by
1012 		 * retrying later from workqueue.
1013 		 */
1014 		if (skb_queue_empty(&psock->ingress_skb)) {
1015 			len = skb->len;
1016 			off = 0;
1017 			if (skb_bpf_strparser(skb)) {
1018 				struct strp_msg *stm = strp_msg(skb);
1019 
1020 				off = stm->offset;
1021 				len = stm->full_len;
1022 			}
1023 			err = sk_psock_skb_ingress_self(psock, skb, off, len, false);
1024 		}
1025 		if (err < 0) {
1026 			spin_lock_bh(&psock->ingress_lock);
1027 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1028 				skb_queue_tail(&psock->ingress_skb, skb);
1029 				schedule_delayed_work(&psock->work, 0);
1030 				err = 0;
1031 			}
1032 			spin_unlock_bh(&psock->ingress_lock);
1033 			if (err < 0)
1034 				goto out_free;
1035 		}
1036 		break;
1037 	case __SK_REDIRECT:
1038 		tcp_eat_skb(psock->sk, skb);
1039 		err = sk_psock_skb_redirect(psock, skb);
1040 		break;
1041 	case __SK_DROP:
1042 	default:
1043 out_free:
1044 		skb_bpf_redirect_clear(skb);
1045 		tcp_eat_skb(psock->sk, skb);
1046 		sock_drop(psock->sk, skb);
1047 	}
1048 
1049 	return err;
1050 }
1051 
1052 static void sk_psock_write_space(struct sock *sk)
1053 {
1054 	struct sk_psock *psock;
1055 	void (*write_space)(struct sock *sk) = NULL;
1056 
1057 	rcu_read_lock();
1058 	psock = sk_psock(sk);
1059 	if (likely(psock)) {
1060 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1061 			schedule_delayed_work(&psock->work, 0);
1062 		write_space = psock->saved_write_space;
1063 	}
1064 	rcu_read_unlock();
1065 	if (write_space)
1066 		write_space(sk);
1067 }
1068 
1069 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1070 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1071 {
1072 	struct sk_psock *psock;
1073 	struct bpf_prog *prog;
1074 	int ret = __SK_DROP;
1075 	struct sock *sk;
1076 
1077 	rcu_read_lock();
1078 	sk = strp->sk;
1079 	psock = sk_psock(sk);
1080 	if (unlikely(!psock)) {
1081 		sock_drop(sk, skb);
1082 		goto out;
1083 	}
1084 	prog = READ_ONCE(psock->progs.stream_verdict);
1085 	if (likely(prog)) {
1086 		skb->sk = sk;
1087 		skb_dst_drop(skb);
1088 		skb_bpf_redirect_clear(skb);
1089 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1090 		skb_bpf_set_strparser(skb);
1091 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1092 		skb->sk = NULL;
1093 	}
1094 	sk_psock_verdict_apply(psock, skb, ret);
1095 out:
1096 	rcu_read_unlock();
1097 }
1098 
1099 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1100 {
1101 	return err;
1102 }
1103 
1104 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1105 {
1106 	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1107 	struct bpf_prog *prog;
1108 	int ret = skb->len;
1109 
1110 	rcu_read_lock();
1111 	prog = READ_ONCE(psock->progs.stream_parser);
1112 	if (likely(prog)) {
1113 		skb->sk = psock->sk;
1114 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1115 		skb->sk = NULL;
1116 	}
1117 	rcu_read_unlock();
1118 	return ret;
1119 }
1120 
1121 /* Called with socket lock held. */
1122 static void sk_psock_strp_data_ready(struct sock *sk)
1123 {
1124 	struct sk_psock *psock;
1125 
1126 	trace_sk_data_ready(sk);
1127 
1128 	rcu_read_lock();
1129 	psock = sk_psock(sk);
1130 	if (likely(psock)) {
1131 		if (tls_sw_has_ctx_rx(sk)) {
1132 			psock->saved_data_ready(sk);
1133 		} else {
1134 			read_lock_bh(&sk->sk_callback_lock);
1135 			strp_data_ready(&psock->strp);
1136 			read_unlock_bh(&sk->sk_callback_lock);
1137 		}
1138 	}
1139 	rcu_read_unlock();
1140 }
1141 
1142 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1143 {
1144 	int ret;
1145 
1146 	static const struct strp_callbacks cb = {
1147 		.rcv_msg	= sk_psock_strp_read,
1148 		.read_sock_done	= sk_psock_strp_read_done,
1149 		.parse_msg	= sk_psock_strp_parse,
1150 	};
1151 
1152 	ret = strp_init(&psock->strp, sk, &cb);
1153 	if (!ret)
1154 		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1155 
1156 	if (sk_is_tcp(sk)) {
1157 		psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
1158 		psock->copied_seq = tcp_sk(sk)->copied_seq;
1159 	}
1160 	return ret;
1161 }
1162 
1163 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1164 {
1165 	if (psock->saved_data_ready)
1166 		return;
1167 
1168 	psock->saved_data_ready = sk->sk_data_ready;
1169 	sk->sk_data_ready = sk_psock_strp_data_ready;
1170 	sk->sk_write_space = sk_psock_write_space;
1171 }
1172 
1173 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1174 {
1175 	psock_set_prog(&psock->progs.stream_parser, NULL);
1176 
1177 	if (!psock->saved_data_ready)
1178 		return;
1179 
1180 	sk->sk_data_ready = psock->saved_data_ready;
1181 	psock->saved_data_ready = NULL;
1182 	strp_stop(&psock->strp);
1183 }
1184 
1185 static void sk_psock_done_strp(struct sk_psock *psock)
1186 {
1187 	/* Parser has been stopped */
1188 	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1189 		strp_done(&psock->strp);
1190 }
1191 #else
1192 static void sk_psock_done_strp(struct sk_psock *psock)
1193 {
1194 }
1195 #endif /* CONFIG_BPF_STREAM_PARSER */
1196 
1197 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1198 {
1199 	struct sk_psock *psock;
1200 	struct bpf_prog *prog;
1201 	int ret = __SK_DROP;
1202 	int len = skb->len;
1203 
1204 	rcu_read_lock();
1205 	psock = sk_psock(sk);
1206 	if (unlikely(!psock)) {
1207 		len = 0;
1208 		tcp_eat_skb(sk, skb);
1209 		sock_drop(sk, skb);
1210 		goto out;
1211 	}
1212 	prog = READ_ONCE(psock->progs.stream_verdict);
1213 	if (!prog)
1214 		prog = READ_ONCE(psock->progs.skb_verdict);
1215 	if (likely(prog)) {
1216 		skb_dst_drop(skb);
1217 		skb_bpf_redirect_clear(skb);
1218 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1219 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1220 	}
1221 	ret = sk_psock_verdict_apply(psock, skb, ret);
1222 	if (ret < 0)
1223 		len = ret;
1224 out:
1225 	rcu_read_unlock();
1226 	return len;
1227 }
1228 
1229 static void sk_psock_verdict_data_ready(struct sock *sk)
1230 {
1231 	struct socket *sock = sk->sk_socket;
1232 	const struct proto_ops *ops;
1233 	int copied;
1234 
1235 	trace_sk_data_ready(sk);
1236 
1237 	if (unlikely(!sock))
1238 		return;
1239 	ops = READ_ONCE(sock->ops);
1240 	if (!ops || !ops->read_skb)
1241 		return;
1242 	copied = ops->read_skb(sk, sk_psock_verdict_recv);
1243 	if (copied >= 0) {
1244 		struct sk_psock *psock;
1245 
1246 		rcu_read_lock();
1247 		psock = sk_psock(sk);
1248 		if (psock)
1249 			sk_psock_data_ready(sk, psock);
1250 		rcu_read_unlock();
1251 	}
1252 }
1253 
1254 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1255 {
1256 	if (psock->saved_data_ready)
1257 		return;
1258 
1259 	psock->saved_data_ready = sk->sk_data_ready;
1260 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1261 	sk->sk_write_space = sk_psock_write_space;
1262 }
1263 
1264 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1265 {
1266 	psock_set_prog(&psock->progs.stream_verdict, NULL);
1267 	psock_set_prog(&psock->progs.skb_verdict, NULL);
1268 
1269 	if (!psock->saved_data_ready)
1270 		return;
1271 
1272 	sk->sk_data_ready = psock->saved_data_ready;
1273 	psock->saved_data_ready = NULL;
1274 }
1275