xref: /linux/net/core/skmsg.c (revision 9c707ba99f1b638e32724691b18fd1429e23b7f4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 #include <trace/events/sock.h>
12 
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 {
15 	if (msg->sg.end > msg->sg.start &&
16 	    elem_first_coalesce < msg->sg.end)
17 		return true;
18 
19 	if (msg->sg.end < msg->sg.start &&
20 	    (elem_first_coalesce > msg->sg.start ||
21 	     elem_first_coalesce < msg->sg.end))
22 		return true;
23 
24 	return false;
25 }
26 
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 		 int elem_first_coalesce)
29 {
30 	struct page_frag *pfrag = sk_page_frag(sk);
31 	u32 osize = msg->sg.size;
32 	int ret = 0;
33 
34 	len -= msg->sg.size;
35 	while (len > 0) {
36 		struct scatterlist *sge;
37 		u32 orig_offset;
38 		int use, i;
39 
40 		if (!sk_page_frag_refill(sk, pfrag)) {
41 			ret = -ENOMEM;
42 			goto msg_trim;
43 		}
44 
45 		orig_offset = pfrag->offset;
46 		use = min_t(int, len, pfrag->size - orig_offset);
47 		if (!sk_wmem_schedule(sk, use)) {
48 			ret = -ENOMEM;
49 			goto msg_trim;
50 		}
51 
52 		i = msg->sg.end;
53 		sk_msg_iter_var_prev(i);
54 		sge = &msg->sg.data[i];
55 
56 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 		    sg_page(sge) == pfrag->page &&
58 		    sge->offset + sge->length == orig_offset) {
59 			sge->length += use;
60 		} else {
61 			if (sk_msg_full(msg)) {
62 				ret = -ENOSPC;
63 				break;
64 			}
65 
66 			sge = &msg->sg.data[msg->sg.end];
67 			sg_unmark_end(sge);
68 			sg_set_page(sge, pfrag->page, use, orig_offset);
69 			get_page(pfrag->page);
70 			sk_msg_iter_next(msg, end);
71 		}
72 
73 		sk_mem_charge(sk, use);
74 		msg->sg.size += use;
75 		pfrag->offset += use;
76 		len -= use;
77 	}
78 
79 	return ret;
80 
81 msg_trim:
82 	sk_msg_trim(sk, msg, osize);
83 	return ret;
84 }
85 EXPORT_SYMBOL_GPL(sk_msg_alloc);
86 
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 		 u32 off, u32 len)
89 {
90 	int i = src->sg.start;
91 	struct scatterlist *sge = sk_msg_elem(src, i);
92 	struct scatterlist *sgd = NULL;
93 	u32 sge_len, sge_off;
94 
95 	while (off) {
96 		if (sge->length > off)
97 			break;
98 		off -= sge->length;
99 		sk_msg_iter_var_next(i);
100 		if (i == src->sg.end && off)
101 			return -ENOSPC;
102 		sge = sk_msg_elem(src, i);
103 	}
104 
105 	while (len) {
106 		sge_len = sge->length - off;
107 		if (sge_len > len)
108 			sge_len = len;
109 
110 		if (dst->sg.end)
111 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
112 
113 		if (sgd &&
114 		    (sg_page(sge) == sg_page(sgd)) &&
115 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 			sgd->length += sge_len;
117 			dst->sg.size += sge_len;
118 		} else if (!sk_msg_full(dst)) {
119 			sge_off = sge->offset + off;
120 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 		} else {
122 			return -ENOSPC;
123 		}
124 
125 		off = 0;
126 		len -= sge_len;
127 		sk_mem_charge(sk, sge_len);
128 		sk_msg_iter_var_next(i);
129 		if (i == src->sg.end && len)
130 			return -ENOSPC;
131 		sge = sk_msg_elem(src, i);
132 	}
133 
134 	return 0;
135 }
136 EXPORT_SYMBOL_GPL(sk_msg_clone);
137 
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139 {
140 	int i = msg->sg.start;
141 
142 	do {
143 		struct scatterlist *sge = sk_msg_elem(msg, i);
144 
145 		if (bytes < sge->length) {
146 			sge->length -= bytes;
147 			sge->offset += bytes;
148 			sk_mem_uncharge(sk, bytes);
149 			break;
150 		}
151 
152 		sk_mem_uncharge(sk, sge->length);
153 		bytes -= sge->length;
154 		sge->length = 0;
155 		sge->offset = 0;
156 		sk_msg_iter_var_next(i);
157 	} while (bytes && i != msg->sg.end);
158 	msg->sg.start = i;
159 }
160 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161 
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163 {
164 	int i = msg->sg.start;
165 
166 	do {
167 		struct scatterlist *sge = &msg->sg.data[i];
168 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
169 
170 		sk_mem_uncharge(sk, uncharge);
171 		bytes -= uncharge;
172 		sk_msg_iter_var_next(i);
173 	} while (i != msg->sg.end);
174 }
175 EXPORT_SYMBOL_GPL(sk_msg_return);
176 
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 			    bool charge)
179 {
180 	struct scatterlist *sge = sk_msg_elem(msg, i);
181 	u32 len = sge->length;
182 
183 	/* When the skb owns the memory we free it from consume_skb path. */
184 	if (!msg->skb) {
185 		if (charge)
186 			sk_mem_uncharge(sk, len);
187 		put_page(sg_page(sge));
188 	}
189 	memset(sge, 0, sizeof(*sge));
190 	return len;
191 }
192 
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 			 bool charge)
195 {
196 	struct scatterlist *sge = sk_msg_elem(msg, i);
197 	int freed = 0;
198 
199 	while (msg->sg.size) {
200 		msg->sg.size -= sge->length;
201 		freed += sk_msg_free_elem(sk, msg, i, charge);
202 		sk_msg_iter_var_next(i);
203 		sk_msg_check_to_free(msg, i, msg->sg.size);
204 		sge = sk_msg_elem(msg, i);
205 	}
206 	consume_skb(msg->skb);
207 	sk_msg_init(msg);
208 	return freed;
209 }
210 
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212 {
213 	return __sk_msg_free(sk, msg, msg->sg.start, false);
214 }
215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216 
sk_msg_free(struct sock * sk,struct sk_msg * msg)217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218 {
219 	return __sk_msg_free(sk, msg, msg->sg.start, true);
220 }
221 EXPORT_SYMBOL_GPL(sk_msg_free);
222 
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 				  u32 bytes, bool charge)
225 {
226 	struct scatterlist *sge;
227 	u32 i = msg->sg.start;
228 
229 	while (bytes) {
230 		sge = sk_msg_elem(msg, i);
231 		if (!sge->length)
232 			break;
233 		if (bytes < sge->length) {
234 			if (charge)
235 				sk_mem_uncharge(sk, bytes);
236 			sge->length -= bytes;
237 			sge->offset += bytes;
238 			msg->sg.size -= bytes;
239 			break;
240 		}
241 
242 		msg->sg.size -= sge->length;
243 		bytes -= sge->length;
244 		sk_msg_free_elem(sk, msg, i, charge);
245 		sk_msg_iter_var_next(i);
246 		sk_msg_check_to_free(msg, i, bytes);
247 	}
248 	msg->sg.start = i;
249 }
250 
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252 {
253 	__sk_msg_free_partial(sk, msg, bytes, true);
254 }
255 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256 
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 				  u32 bytes)
259 {
260 	__sk_msg_free_partial(sk, msg, bytes, false);
261 }
262 
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264 {
265 	int trim = msg->sg.size - len;
266 	u32 i = msg->sg.end;
267 
268 	if (trim <= 0) {
269 		WARN_ON(trim < 0);
270 		return;
271 	}
272 
273 	sk_msg_iter_var_prev(i);
274 	msg->sg.size = len;
275 	while (msg->sg.data[i].length &&
276 	       trim >= msg->sg.data[i].length) {
277 		trim -= msg->sg.data[i].length;
278 		sk_msg_free_elem(sk, msg, i, true);
279 		sk_msg_iter_var_prev(i);
280 		if (!trim)
281 			goto out;
282 	}
283 
284 	msg->sg.data[i].length -= trim;
285 	sk_mem_uncharge(sk, trim);
286 	/* Adjust copybreak if it falls into the trimmed part of last buf */
287 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 		msg->sg.copybreak = msg->sg.data[i].length;
289 out:
290 	sk_msg_iter_var_next(i);
291 	msg->sg.end = i;
292 
293 	/* If we trim data a full sg elem before curr pointer update
294 	 * copybreak and current so that any future copy operations
295 	 * start at new copy location.
296 	 * However trimmed data that has not yet been used in a copy op
297 	 * does not require an update.
298 	 */
299 	if (!msg->sg.size) {
300 		msg->sg.curr = msg->sg.start;
301 		msg->sg.copybreak = 0;
302 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 		sk_msg_iter_var_prev(i);
305 		msg->sg.curr = i;
306 		msg->sg.copybreak = msg->sg.data[i].length;
307 	}
308 }
309 EXPORT_SYMBOL_GPL(sk_msg_trim);
310 
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 			      struct sk_msg *msg, u32 bytes)
313 {
314 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 	const int to_max_pages = MAX_MSG_FRAGS;
316 	struct page *pages[MAX_MSG_FRAGS];
317 	ssize_t orig, copied, use, offset;
318 
319 	orig = msg->sg.size;
320 	while (bytes > 0) {
321 		i = 0;
322 		maxpages = to_max_pages - num_elems;
323 		if (maxpages == 0) {
324 			ret = -EFAULT;
325 			goto out;
326 		}
327 
328 		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 					    &offset);
330 		if (copied <= 0) {
331 			ret = -EFAULT;
332 			goto out;
333 		}
334 
335 		bytes -= copied;
336 		msg->sg.size += copied;
337 
338 		while (copied) {
339 			use = min_t(int, copied, PAGE_SIZE - offset);
340 			sg_set_page(&msg->sg.data[msg->sg.end],
341 				    pages[i], use, offset);
342 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 			sk_mem_charge(sk, use);
344 
345 			offset = 0;
346 			copied -= use;
347 			sk_msg_iter_next(msg, end);
348 			num_elems++;
349 			i++;
350 		}
351 		/* When zerocopy is mixed with sk_msg_*copy* operations we
352 		 * may have a copybreak set in this case clear and prefer
353 		 * zerocopy remainder when possible.
354 		 */
355 		msg->sg.copybreak = 0;
356 		msg->sg.curr = msg->sg.end;
357 	}
358 out:
359 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
360 	 * also needs to be cleared.
361 	 */
362 	if (ret)
363 		iov_iter_revert(from, msg->sg.size - orig);
364 	return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367 
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 			     struct sk_msg *msg, u32 bytes)
370 {
371 	int ret = -ENOSPC, i = msg->sg.curr;
372 	u32 copy, buf_size, copied = 0;
373 	struct scatterlist *sge;
374 	void *to;
375 
376 	do {
377 		sge = sk_msg_elem(msg, i);
378 		/* This is possible if a trim operation shrunk the buffer */
379 		if (msg->sg.copybreak >= sge->length) {
380 			msg->sg.copybreak = 0;
381 			sk_msg_iter_var_next(i);
382 			if (i == msg->sg.end)
383 				break;
384 			sge = sk_msg_elem(msg, i);
385 		}
386 
387 		buf_size = sge->length - msg->sg.copybreak;
388 		copy = (buf_size > bytes) ? bytes : buf_size;
389 		to = sg_virt(sge) + msg->sg.copybreak;
390 		msg->sg.copybreak += copy;
391 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 			ret = copy_from_iter_nocache(to, copy, from);
393 		else
394 			ret = copy_from_iter(to, copy, from);
395 		if (ret != copy) {
396 			ret = -EFAULT;
397 			goto out;
398 		}
399 		bytes -= copy;
400 		copied += copy;
401 		if (!bytes)
402 			break;
403 		msg->sg.copybreak = 0;
404 		sk_msg_iter_var_next(i);
405 	} while (i != msg->sg.end);
406 out:
407 	msg->sg.curr = i;
408 	return (ret < 0) ? ret : copied;
409 }
410 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
411 
412 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)413 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
414 		   int len, int flags)
415 {
416 	struct iov_iter *iter = &msg->msg_iter;
417 	int peek = flags & MSG_PEEK;
418 	struct sk_msg *msg_rx;
419 	int i, copied = 0;
420 
421 	msg_rx = sk_psock_peek_msg(psock);
422 	while (copied != len) {
423 		struct scatterlist *sge;
424 
425 		if (unlikely(!msg_rx))
426 			break;
427 
428 		i = msg_rx->sg.start;
429 		do {
430 			struct page *page;
431 			int copy;
432 
433 			sge = sk_msg_elem(msg_rx, i);
434 			copy = sge->length;
435 			page = sg_page(sge);
436 			if (copied + copy > len)
437 				copy = len - copied;
438 			if (copy)
439 				copy = copy_page_to_iter(page, sge->offset, copy, iter);
440 			if (!copy) {
441 				copied = copied ? copied : -EFAULT;
442 				goto out;
443 			}
444 
445 			copied += copy;
446 			if (likely(!peek)) {
447 				sge->offset += copy;
448 				sge->length -= copy;
449 				if (!msg_rx->skb) {
450 					sk_mem_uncharge(sk, copy);
451 					atomic_sub(copy, &sk->sk_rmem_alloc);
452 				}
453 				msg_rx->sg.size -= copy;
454 
455 				if (!sge->length) {
456 					sk_msg_iter_var_next(i);
457 					if (!msg_rx->skb)
458 						put_page(page);
459 				}
460 			} else {
461 				/* Lets not optimize peek case if copy_page_to_iter
462 				 * didn't copy the entire length lets just break.
463 				 */
464 				if (copy != sge->length)
465 					goto out;
466 				sk_msg_iter_var_next(i);
467 			}
468 
469 			if (copied == len)
470 				break;
471 		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
472 
473 		if (unlikely(peek)) {
474 			msg_rx = sk_psock_next_msg(psock, msg_rx);
475 			if (!msg_rx)
476 				break;
477 			continue;
478 		}
479 
480 		msg_rx->sg.start = i;
481 		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
482 			msg_rx = sk_psock_dequeue_msg(psock);
483 			kfree_sk_msg(msg_rx);
484 		}
485 		msg_rx = sk_psock_peek_msg(psock);
486 	}
487 out:
488 	return copied;
489 }
490 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
491 
sk_msg_is_readable(struct sock * sk)492 bool sk_msg_is_readable(struct sock *sk)
493 {
494 	struct sk_psock *psock;
495 	bool empty = true;
496 
497 	rcu_read_lock();
498 	psock = sk_psock(sk);
499 	if (likely(psock))
500 		empty = list_empty(&psock->ingress_msg);
501 	rcu_read_unlock();
502 	return !empty;
503 }
504 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
505 
alloc_sk_msg(gfp_t gfp)506 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
507 {
508 	struct sk_msg *msg;
509 
510 	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
511 	if (unlikely(!msg))
512 		return NULL;
513 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
514 	return msg;
515 }
516 
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)517 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
518 						  struct sk_buff *skb)
519 {
520 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
521 		return NULL;
522 
523 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
524 		return NULL;
525 
526 	return alloc_sk_msg(GFP_KERNEL);
527 }
528 
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,u32 off,u32 len,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg)529 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
530 					u32 off, u32 len,
531 					struct sk_psock *psock,
532 					struct sock *sk,
533 					struct sk_msg *msg)
534 {
535 	int num_sge, copied;
536 
537 	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
538 	if (num_sge < 0) {
539 		/* skb linearize may fail with ENOMEM, but lets simply try again
540 		 * later if this happens. Under memory pressure we don't want to
541 		 * drop the skb. We need to linearize the skb so that the mapping
542 		 * in skb_to_sgvec can not error.
543 		 */
544 		if (skb_linearize(skb))
545 			return -EAGAIN;
546 
547 		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
548 		if (unlikely(num_sge < 0))
549 			return num_sge;
550 	}
551 
552 	copied = len;
553 	msg->sg.start = 0;
554 	msg->sg.size = copied;
555 	msg->sg.end = num_sge;
556 	msg->skb = skb;
557 
558 	sk_psock_queue_msg(psock, msg);
559 	sk_psock_data_ready(sk, psock);
560 	return copied;
561 }
562 
563 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
564 				     u32 off, u32 len);
565 
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)566 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
567 				u32 off, u32 len)
568 {
569 	struct sock *sk = psock->sk;
570 	struct sk_msg *msg;
571 	int err;
572 
573 	/* If we are receiving on the same sock skb->sk is already assigned,
574 	 * skip memory accounting and owner transition seeing it already set
575 	 * correctly.
576 	 */
577 	if (unlikely(skb->sk == sk))
578 		return sk_psock_skb_ingress_self(psock, skb, off, len);
579 	msg = sk_psock_create_ingress_msg(sk, skb);
580 	if (!msg)
581 		return -EAGAIN;
582 
583 	/* This will transition ownership of the data from the socket where
584 	 * the BPF program was run initiating the redirect to the socket
585 	 * we will eventually receive this data on. The data will be released
586 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
587 	 * into user buffers.
588 	 */
589 	skb_set_owner_r(skb, sk);
590 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
591 	if (err < 0)
592 		kfree(msg);
593 	return err;
594 }
595 
596 /* Puts an skb on the ingress queue of the socket already assigned to the
597  * skb. In this case we do not need to check memory limits or skb_set_owner_r
598  * because the skb is already accounted for here.
599  */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)600 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
601 				     u32 off, u32 len)
602 {
603 	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
604 	struct sock *sk = psock->sk;
605 	int err;
606 
607 	if (unlikely(!msg))
608 		return -EAGAIN;
609 	skb_set_owner_r(skb, sk);
610 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
611 	if (err < 0)
612 		kfree(msg);
613 	return err;
614 }
615 
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)616 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
617 			       u32 off, u32 len, bool ingress)
618 {
619 	int err = 0;
620 
621 	if (!ingress) {
622 		if (!sock_writeable(psock->sk))
623 			return -EAGAIN;
624 		return skb_send_sock(psock->sk, skb, off, len);
625 	}
626 	skb_get(skb);
627 	err = sk_psock_skb_ingress(psock, skb, off, len);
628 	if (err < 0)
629 		kfree_skb(skb);
630 	return err;
631 }
632 
sk_psock_skb_state(struct sk_psock * psock,struct sk_psock_work_state * state,int len,int off)633 static void sk_psock_skb_state(struct sk_psock *psock,
634 			       struct sk_psock_work_state *state,
635 			       int len, int off)
636 {
637 	spin_lock_bh(&psock->ingress_lock);
638 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
639 		state->len = len;
640 		state->off = off;
641 	}
642 	spin_unlock_bh(&psock->ingress_lock);
643 }
644 
sk_psock_backlog(struct work_struct * work)645 static void sk_psock_backlog(struct work_struct *work)
646 {
647 	struct delayed_work *dwork = to_delayed_work(work);
648 	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
649 	struct sk_psock_work_state *state = &psock->work_state;
650 	struct sk_buff *skb = NULL;
651 	u32 len = 0, off = 0;
652 	bool ingress;
653 	int ret;
654 
655 	mutex_lock(&psock->work_mutex);
656 	if (unlikely(state->len)) {
657 		len = state->len;
658 		off = state->off;
659 	}
660 
661 	while ((skb = skb_peek(&psock->ingress_skb))) {
662 		len = skb->len;
663 		off = 0;
664 		if (skb_bpf_strparser(skb)) {
665 			struct strp_msg *stm = strp_msg(skb);
666 
667 			off = stm->offset;
668 			len = stm->full_len;
669 		}
670 		ingress = skb_bpf_ingress(skb);
671 		skb_bpf_redirect_clear(skb);
672 		do {
673 			ret = -EIO;
674 			if (!sock_flag(psock->sk, SOCK_DEAD))
675 				ret = sk_psock_handle_skb(psock, skb, off,
676 							  len, ingress);
677 			if (ret <= 0) {
678 				if (ret == -EAGAIN) {
679 					sk_psock_skb_state(psock, state, len, off);
680 
681 					/* Delay slightly to prioritize any
682 					 * other work that might be here.
683 					 */
684 					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
685 						schedule_delayed_work(&psock->work, 1);
686 					goto end;
687 				}
688 				/* Hard errors break pipe and stop xmit. */
689 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
690 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
691 				goto end;
692 			}
693 			off += ret;
694 			len -= ret;
695 		} while (len);
696 
697 		skb = skb_dequeue(&psock->ingress_skb);
698 		kfree_skb(skb);
699 	}
700 end:
701 	mutex_unlock(&psock->work_mutex);
702 }
703 
sk_psock_init(struct sock * sk,int node)704 struct sk_psock *sk_psock_init(struct sock *sk, int node)
705 {
706 	struct sk_psock *psock;
707 	struct proto *prot;
708 
709 	write_lock_bh(&sk->sk_callback_lock);
710 
711 	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
712 		psock = ERR_PTR(-EINVAL);
713 		goto out;
714 	}
715 
716 	if (sk->sk_user_data) {
717 		psock = ERR_PTR(-EBUSY);
718 		goto out;
719 	}
720 
721 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
722 	if (!psock) {
723 		psock = ERR_PTR(-ENOMEM);
724 		goto out;
725 	}
726 
727 	prot = READ_ONCE(sk->sk_prot);
728 	psock->sk = sk;
729 	psock->eval = __SK_NONE;
730 	psock->sk_proto = prot;
731 	psock->saved_unhash = prot->unhash;
732 	psock->saved_destroy = prot->destroy;
733 	psock->saved_close = prot->close;
734 	psock->saved_write_space = sk->sk_write_space;
735 
736 	INIT_LIST_HEAD(&psock->link);
737 	spin_lock_init(&psock->link_lock);
738 
739 	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
740 	mutex_init(&psock->work_mutex);
741 	INIT_LIST_HEAD(&psock->ingress_msg);
742 	spin_lock_init(&psock->ingress_lock);
743 	skb_queue_head_init(&psock->ingress_skb);
744 
745 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
746 	refcount_set(&psock->refcnt, 1);
747 
748 	__rcu_assign_sk_user_data_with_flags(sk, psock,
749 					     SK_USER_DATA_NOCOPY |
750 					     SK_USER_DATA_PSOCK);
751 	sock_hold(sk);
752 
753 out:
754 	write_unlock_bh(&sk->sk_callback_lock);
755 	return psock;
756 }
757 EXPORT_SYMBOL_GPL(sk_psock_init);
758 
sk_psock_link_pop(struct sk_psock * psock)759 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
760 {
761 	struct sk_psock_link *link;
762 
763 	spin_lock_bh(&psock->link_lock);
764 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
765 					list);
766 	if (link)
767 		list_del(&link->list);
768 	spin_unlock_bh(&psock->link_lock);
769 	return link;
770 }
771 
__sk_psock_purge_ingress_msg(struct sk_psock * psock)772 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
773 {
774 	struct sk_msg *msg, *tmp;
775 
776 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
777 		list_del(&msg->list);
778 		if (!msg->skb)
779 			atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
780 		sk_msg_free(psock->sk, msg);
781 		kfree(msg);
782 	}
783 }
784 
__sk_psock_zap_ingress(struct sk_psock * psock)785 static void __sk_psock_zap_ingress(struct sk_psock *psock)
786 {
787 	struct sk_buff *skb;
788 
789 	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
790 		skb_bpf_redirect_clear(skb);
791 		sock_drop(psock->sk, skb);
792 	}
793 	__sk_psock_purge_ingress_msg(psock);
794 }
795 
sk_psock_link_destroy(struct sk_psock * psock)796 static void sk_psock_link_destroy(struct sk_psock *psock)
797 {
798 	struct sk_psock_link *link, *tmp;
799 
800 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
801 		list_del(&link->list);
802 		sk_psock_free_link(link);
803 	}
804 }
805 
sk_psock_stop(struct sk_psock * psock)806 void sk_psock_stop(struct sk_psock *psock)
807 {
808 	spin_lock_bh(&psock->ingress_lock);
809 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
810 	sk_psock_cork_free(psock);
811 	spin_unlock_bh(&psock->ingress_lock);
812 }
813 
814 static void sk_psock_done_strp(struct sk_psock *psock);
815 
sk_psock_destroy(struct work_struct * work)816 static void sk_psock_destroy(struct work_struct *work)
817 {
818 	struct sk_psock *psock = container_of(to_rcu_work(work),
819 					      struct sk_psock, rwork);
820 	/* No sk_callback_lock since already detached. */
821 
822 	sk_psock_done_strp(psock);
823 
824 	cancel_delayed_work_sync(&psock->work);
825 	__sk_psock_zap_ingress(psock);
826 	mutex_destroy(&psock->work_mutex);
827 
828 	psock_progs_drop(&psock->progs);
829 
830 	sk_psock_link_destroy(psock);
831 	sk_psock_cork_free(psock);
832 
833 	if (psock->sk_redir)
834 		sock_put(psock->sk_redir);
835 	if (psock->sk_pair)
836 		sock_put(psock->sk_pair);
837 	sock_put(psock->sk);
838 	kfree(psock);
839 }
840 
sk_psock_drop(struct sock * sk,struct sk_psock * psock)841 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
842 {
843 	write_lock_bh(&sk->sk_callback_lock);
844 	sk_psock_restore_proto(sk, psock);
845 	rcu_assign_sk_user_data(sk, NULL);
846 	if (psock->progs.stream_parser)
847 		sk_psock_stop_strp(sk, psock);
848 	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
849 		sk_psock_stop_verdict(sk, psock);
850 	write_unlock_bh(&sk->sk_callback_lock);
851 
852 	sk_psock_stop(psock);
853 
854 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
855 	queue_rcu_work(system_wq, &psock->rwork);
856 }
857 EXPORT_SYMBOL_GPL(sk_psock_drop);
858 
sk_psock_map_verd(int verdict,bool redir)859 static int sk_psock_map_verd(int verdict, bool redir)
860 {
861 	switch (verdict) {
862 	case SK_PASS:
863 		return redir ? __SK_REDIRECT : __SK_PASS;
864 	case SK_DROP:
865 	default:
866 		break;
867 	}
868 
869 	return __SK_DROP;
870 }
871 
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)872 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
873 			 struct sk_msg *msg)
874 {
875 	struct bpf_prog *prog;
876 	int ret;
877 
878 	rcu_read_lock();
879 	prog = READ_ONCE(psock->progs.msg_parser);
880 	if (unlikely(!prog)) {
881 		ret = __SK_PASS;
882 		goto out;
883 	}
884 
885 	sk_msg_compute_data_pointers(msg);
886 	msg->sk = sk;
887 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
888 	ret = sk_psock_map_verd(ret, msg->sk_redir);
889 	psock->apply_bytes = msg->apply_bytes;
890 	if (ret == __SK_REDIRECT) {
891 		if (psock->sk_redir) {
892 			sock_put(psock->sk_redir);
893 			psock->sk_redir = NULL;
894 		}
895 		if (!msg->sk_redir) {
896 			ret = __SK_DROP;
897 			goto out;
898 		}
899 		psock->redir_ingress = sk_msg_to_ingress(msg);
900 		psock->sk_redir = msg->sk_redir;
901 		sock_hold(psock->sk_redir);
902 	}
903 out:
904 	rcu_read_unlock();
905 	return ret;
906 }
907 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
908 
sk_psock_skb_redirect(struct sk_psock * from,struct sk_buff * skb)909 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
910 {
911 	struct sk_psock *psock_other;
912 	struct sock *sk_other;
913 
914 	sk_other = skb_bpf_redirect_fetch(skb);
915 	/* This error is a buggy BPF program, it returned a redirect
916 	 * return code, but then didn't set a redirect interface.
917 	 */
918 	if (unlikely(!sk_other)) {
919 		skb_bpf_redirect_clear(skb);
920 		sock_drop(from->sk, skb);
921 		return -EIO;
922 	}
923 	psock_other = sk_psock(sk_other);
924 	/* This error indicates the socket is being torn down or had another
925 	 * error that caused the pipe to break. We can't send a packet on
926 	 * a socket that is in this state so we drop the skb.
927 	 */
928 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
929 		skb_bpf_redirect_clear(skb);
930 		sock_drop(from->sk, skb);
931 		return -EIO;
932 	}
933 	spin_lock_bh(&psock_other->ingress_lock);
934 	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
935 		spin_unlock_bh(&psock_other->ingress_lock);
936 		skb_bpf_redirect_clear(skb);
937 		sock_drop(from->sk, skb);
938 		return -EIO;
939 	}
940 
941 	skb_queue_tail(&psock_other->ingress_skb, skb);
942 	schedule_delayed_work(&psock_other->work, 0);
943 	spin_unlock_bh(&psock_other->ingress_lock);
944 	return 0;
945 }
946 
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sk_psock * from,int verdict)947 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
948 				       struct sk_psock *from, int verdict)
949 {
950 	switch (verdict) {
951 	case __SK_REDIRECT:
952 		sk_psock_skb_redirect(from, skb);
953 		break;
954 	case __SK_PASS:
955 	case __SK_DROP:
956 	default:
957 		break;
958 	}
959 }
960 
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)961 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
962 {
963 	struct bpf_prog *prog;
964 	int ret = __SK_PASS;
965 
966 	rcu_read_lock();
967 	prog = READ_ONCE(psock->progs.stream_verdict);
968 	if (likely(prog)) {
969 		skb->sk = psock->sk;
970 		skb_dst_drop(skb);
971 		skb_bpf_redirect_clear(skb);
972 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
973 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
974 		skb->sk = NULL;
975 	}
976 	sk_psock_tls_verdict_apply(skb, psock, ret);
977 	rcu_read_unlock();
978 	return ret;
979 }
980 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
981 
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)982 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
983 				  int verdict)
984 {
985 	struct sock *sk_other;
986 	int err = 0;
987 	u32 len, off;
988 
989 	switch (verdict) {
990 	case __SK_PASS:
991 		err = -EIO;
992 		sk_other = psock->sk;
993 		if (sock_flag(sk_other, SOCK_DEAD) ||
994 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
995 			goto out_free;
996 
997 		skb_bpf_set_ingress(skb);
998 
999 		/* If the queue is empty then we can submit directly
1000 		 * into the msg queue. If its not empty we have to
1001 		 * queue work otherwise we may get OOO data. Otherwise,
1002 		 * if sk_psock_skb_ingress errors will be handled by
1003 		 * retrying later from workqueue.
1004 		 */
1005 		if (skb_queue_empty(&psock->ingress_skb)) {
1006 			len = skb->len;
1007 			off = 0;
1008 			if (skb_bpf_strparser(skb)) {
1009 				struct strp_msg *stm = strp_msg(skb);
1010 
1011 				off = stm->offset;
1012 				len = stm->full_len;
1013 			}
1014 			err = sk_psock_skb_ingress_self(psock, skb, off, len);
1015 		}
1016 		if (err < 0) {
1017 			spin_lock_bh(&psock->ingress_lock);
1018 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1019 				skb_queue_tail(&psock->ingress_skb, skb);
1020 				schedule_delayed_work(&psock->work, 0);
1021 				err = 0;
1022 			}
1023 			spin_unlock_bh(&psock->ingress_lock);
1024 			if (err < 0)
1025 				goto out_free;
1026 		}
1027 		break;
1028 	case __SK_REDIRECT:
1029 		tcp_eat_skb(psock->sk, skb);
1030 		err = sk_psock_skb_redirect(psock, skb);
1031 		break;
1032 	case __SK_DROP:
1033 	default:
1034 out_free:
1035 		skb_bpf_redirect_clear(skb);
1036 		tcp_eat_skb(psock->sk, skb);
1037 		sock_drop(psock->sk, skb);
1038 	}
1039 
1040 	return err;
1041 }
1042 
sk_psock_write_space(struct sock * sk)1043 static void sk_psock_write_space(struct sock *sk)
1044 {
1045 	struct sk_psock *psock;
1046 	void (*write_space)(struct sock *sk) = NULL;
1047 
1048 	rcu_read_lock();
1049 	psock = sk_psock(sk);
1050 	if (likely(psock)) {
1051 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1052 			schedule_delayed_work(&psock->work, 0);
1053 		write_space = psock->saved_write_space;
1054 	}
1055 	rcu_read_unlock();
1056 	if (write_space)
1057 		write_space(sk);
1058 }
1059 
1060 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)1061 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1062 {
1063 	struct sk_psock *psock;
1064 	struct bpf_prog *prog;
1065 	int ret = __SK_DROP;
1066 	struct sock *sk;
1067 
1068 	rcu_read_lock();
1069 	sk = strp->sk;
1070 	psock = sk_psock(sk);
1071 	if (unlikely(!psock)) {
1072 		sock_drop(sk, skb);
1073 		goto out;
1074 	}
1075 	prog = READ_ONCE(psock->progs.stream_verdict);
1076 	if (likely(prog)) {
1077 		skb->sk = sk;
1078 		skb_dst_drop(skb);
1079 		skb_bpf_redirect_clear(skb);
1080 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1081 		skb_bpf_set_strparser(skb);
1082 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1083 		skb->sk = NULL;
1084 	}
1085 	sk_psock_verdict_apply(psock, skb, ret);
1086 out:
1087 	rcu_read_unlock();
1088 }
1089 
sk_psock_strp_read_done(struct strparser * strp,int err)1090 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1091 {
1092 	return err;
1093 }
1094 
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1095 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1096 {
1097 	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1098 	struct bpf_prog *prog;
1099 	int ret = skb->len;
1100 
1101 	rcu_read_lock();
1102 	prog = READ_ONCE(psock->progs.stream_parser);
1103 	if (likely(prog)) {
1104 		skb->sk = psock->sk;
1105 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1106 		skb->sk = NULL;
1107 	}
1108 	rcu_read_unlock();
1109 	return ret;
1110 }
1111 
1112 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1113 static void sk_psock_strp_data_ready(struct sock *sk)
1114 {
1115 	struct sk_psock *psock;
1116 
1117 	trace_sk_data_ready(sk);
1118 
1119 	rcu_read_lock();
1120 	psock = sk_psock(sk);
1121 	if (likely(psock)) {
1122 		if (tls_sw_has_ctx_rx(sk)) {
1123 			psock->saved_data_ready(sk);
1124 		} else {
1125 			read_lock_bh(&sk->sk_callback_lock);
1126 			strp_data_ready(&psock->strp);
1127 			read_unlock_bh(&sk->sk_callback_lock);
1128 		}
1129 	}
1130 	rcu_read_unlock();
1131 }
1132 
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1133 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1134 {
1135 	int ret;
1136 
1137 	static const struct strp_callbacks cb = {
1138 		.rcv_msg	= sk_psock_strp_read,
1139 		.read_sock_done	= sk_psock_strp_read_done,
1140 		.parse_msg	= sk_psock_strp_parse,
1141 	};
1142 
1143 	ret = strp_init(&psock->strp, sk, &cb);
1144 	if (!ret)
1145 		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1146 
1147 	return ret;
1148 }
1149 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1150 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1151 {
1152 	if (psock->saved_data_ready)
1153 		return;
1154 
1155 	psock->saved_data_ready = sk->sk_data_ready;
1156 	sk->sk_data_ready = sk_psock_strp_data_ready;
1157 	sk->sk_write_space = sk_psock_write_space;
1158 }
1159 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1160 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1161 {
1162 	psock_set_prog(&psock->progs.stream_parser, NULL);
1163 
1164 	if (!psock->saved_data_ready)
1165 		return;
1166 
1167 	sk->sk_data_ready = psock->saved_data_ready;
1168 	psock->saved_data_ready = NULL;
1169 	strp_stop(&psock->strp);
1170 }
1171 
sk_psock_done_strp(struct sk_psock * psock)1172 static void sk_psock_done_strp(struct sk_psock *psock)
1173 {
1174 	/* Parser has been stopped */
1175 	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1176 		strp_done(&psock->strp);
1177 }
1178 #else
sk_psock_done_strp(struct sk_psock * psock)1179 static void sk_psock_done_strp(struct sk_psock *psock)
1180 {
1181 }
1182 #endif /* CONFIG_BPF_STREAM_PARSER */
1183 
sk_psock_verdict_recv(struct sock * sk,struct sk_buff * skb)1184 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1185 {
1186 	struct sk_psock *psock;
1187 	struct bpf_prog *prog;
1188 	int ret = __SK_DROP;
1189 	int len = skb->len;
1190 
1191 	rcu_read_lock();
1192 	psock = sk_psock(sk);
1193 	if (unlikely(!psock)) {
1194 		len = 0;
1195 		tcp_eat_skb(sk, skb);
1196 		sock_drop(sk, skb);
1197 		goto out;
1198 	}
1199 	prog = READ_ONCE(psock->progs.stream_verdict);
1200 	if (!prog)
1201 		prog = READ_ONCE(psock->progs.skb_verdict);
1202 	if (likely(prog)) {
1203 		skb_dst_drop(skb);
1204 		skb_bpf_redirect_clear(skb);
1205 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1206 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1207 	}
1208 	ret = sk_psock_verdict_apply(psock, skb, ret);
1209 	if (ret < 0)
1210 		len = ret;
1211 out:
1212 	rcu_read_unlock();
1213 	return len;
1214 }
1215 
sk_psock_verdict_data_ready(struct sock * sk)1216 static void sk_psock_verdict_data_ready(struct sock *sk)
1217 {
1218 	struct socket *sock = sk->sk_socket;
1219 	const struct proto_ops *ops;
1220 	int copied;
1221 
1222 	trace_sk_data_ready(sk);
1223 
1224 	if (unlikely(!sock))
1225 		return;
1226 	ops = READ_ONCE(sock->ops);
1227 	if (!ops || !ops->read_skb)
1228 		return;
1229 	copied = ops->read_skb(sk, sk_psock_verdict_recv);
1230 	if (copied >= 0) {
1231 		struct sk_psock *psock;
1232 
1233 		rcu_read_lock();
1234 		psock = sk_psock(sk);
1235 		if (psock)
1236 			sk_psock_data_ready(sk, psock);
1237 		rcu_read_unlock();
1238 	}
1239 }
1240 
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1241 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1242 {
1243 	if (psock->saved_data_ready)
1244 		return;
1245 
1246 	psock->saved_data_ready = sk->sk_data_ready;
1247 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1248 	sk->sk_write_space = sk_psock_write_space;
1249 }
1250 
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1251 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1252 {
1253 	psock_set_prog(&psock->progs.stream_verdict, NULL);
1254 	psock_set_prog(&psock->progs.skb_verdict, NULL);
1255 
1256 	if (!psock->saved_data_ready)
1257 		return;
1258 
1259 	sk->sk_data_ready = psock->saved_data_ready;
1260 	psock->saved_data_ready = NULL;
1261 }
1262