xref: /linux/net/core/skmsg.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 #include <trace/events/sock.h>
12 
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 {
15 	if (msg->sg.end > msg->sg.start &&
16 	    elem_first_coalesce < msg->sg.end)
17 		return true;
18 
19 	if (msg->sg.end < msg->sg.start &&
20 	    (elem_first_coalesce > msg->sg.start ||
21 	     elem_first_coalesce < msg->sg.end))
22 		return true;
23 
24 	return false;
25 }
26 
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 		 int elem_first_coalesce)
29 {
30 	struct page_frag *pfrag = sk_page_frag(sk);
31 	u32 osize = msg->sg.size;
32 	int ret = 0;
33 
34 	len -= msg->sg.size;
35 	while (len > 0) {
36 		struct scatterlist *sge;
37 		u32 orig_offset;
38 		int use, i;
39 
40 		if (!sk_page_frag_refill(sk, pfrag)) {
41 			ret = -ENOMEM;
42 			goto msg_trim;
43 		}
44 
45 		orig_offset = pfrag->offset;
46 		use = min_t(int, len, pfrag->size - orig_offset);
47 		if (!sk_wmem_schedule(sk, use)) {
48 			ret = -ENOMEM;
49 			goto msg_trim;
50 		}
51 
52 		i = msg->sg.end;
53 		sk_msg_iter_var_prev(i);
54 		sge = &msg->sg.data[i];
55 
56 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 		    sg_page(sge) == pfrag->page &&
58 		    sge->offset + sge->length == orig_offset) {
59 			sge->length += use;
60 		} else {
61 			if (sk_msg_full(msg)) {
62 				ret = -ENOSPC;
63 				break;
64 			}
65 
66 			sge = &msg->sg.data[msg->sg.end];
67 			sg_unmark_end(sge);
68 			sg_set_page(sge, pfrag->page, use, orig_offset);
69 			get_page(pfrag->page);
70 			sk_msg_iter_next(msg, end);
71 		}
72 
73 		sk_mem_charge(sk, use);
74 		msg->sg.size += use;
75 		pfrag->offset += use;
76 		len -= use;
77 	}
78 
79 	return ret;
80 
81 msg_trim:
82 	sk_msg_trim(sk, msg, osize);
83 	return ret;
84 }
85 EXPORT_SYMBOL_GPL(sk_msg_alloc);
86 
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 		 u32 off, u32 len)
89 {
90 	int i = src->sg.start;
91 	struct scatterlist *sge = sk_msg_elem(src, i);
92 	struct scatterlist *sgd = NULL;
93 	u32 sge_len, sge_off;
94 
95 	while (off) {
96 		if (sge->length > off)
97 			break;
98 		off -= sge->length;
99 		sk_msg_iter_var_next(i);
100 		if (i == src->sg.end && off)
101 			return -ENOSPC;
102 		sge = sk_msg_elem(src, i);
103 	}
104 
105 	while (len) {
106 		sge_len = sge->length - off;
107 		if (sge_len > len)
108 			sge_len = len;
109 
110 		if (dst->sg.end)
111 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
112 
113 		if (sgd &&
114 		    (sg_page(sge) == sg_page(sgd)) &&
115 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 			sgd->length += sge_len;
117 			dst->sg.size += sge_len;
118 		} else if (!sk_msg_full(dst)) {
119 			sge_off = sge->offset + off;
120 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 		} else {
122 			return -ENOSPC;
123 		}
124 
125 		off = 0;
126 		len -= sge_len;
127 		sk_mem_charge(sk, sge_len);
128 		sk_msg_iter_var_next(i);
129 		if (i == src->sg.end && len)
130 			return -ENOSPC;
131 		sge = sk_msg_elem(src, i);
132 	}
133 
134 	return 0;
135 }
136 EXPORT_SYMBOL_GPL(sk_msg_clone);
137 
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139 {
140 	int i = msg->sg.start;
141 
142 	do {
143 		struct scatterlist *sge = sk_msg_elem(msg, i);
144 
145 		if (bytes < sge->length) {
146 			sge->length -= bytes;
147 			sge->offset += bytes;
148 			sk_mem_uncharge(sk, bytes);
149 			break;
150 		}
151 
152 		sk_mem_uncharge(sk, sge->length);
153 		bytes -= sge->length;
154 		sge->length = 0;
155 		sge->offset = 0;
156 		sk_msg_iter_var_next(i);
157 	} while (bytes && i != msg->sg.end);
158 	msg->sg.start = i;
159 }
160 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161 
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163 {
164 	int i = msg->sg.start;
165 
166 	do {
167 		struct scatterlist *sge = &msg->sg.data[i];
168 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
169 
170 		sk_mem_uncharge(sk, uncharge);
171 		bytes -= uncharge;
172 		sk_msg_iter_var_next(i);
173 	} while (i != msg->sg.end);
174 }
175 EXPORT_SYMBOL_GPL(sk_msg_return);
176 
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 			    bool charge)
179 {
180 	struct scatterlist *sge = sk_msg_elem(msg, i);
181 	u32 len = sge->length;
182 
183 	/* When the skb owns the memory we free it from consume_skb path. */
184 	if (!msg->skb) {
185 		if (charge)
186 			sk_mem_uncharge(sk, len);
187 		put_page(sg_page(sge));
188 	}
189 	memset(sge, 0, sizeof(*sge));
190 	return len;
191 }
192 
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 			 bool charge)
195 {
196 	struct scatterlist *sge = sk_msg_elem(msg, i);
197 	int freed = 0;
198 
199 	while (msg->sg.size) {
200 		msg->sg.size -= sge->length;
201 		freed += sk_msg_free_elem(sk, msg, i, charge);
202 		sk_msg_iter_var_next(i);
203 		sk_msg_check_to_free(msg, i, msg->sg.size);
204 		sge = sk_msg_elem(msg, i);
205 	}
206 	consume_skb(msg->skb);
207 	sk_msg_init(msg);
208 	return freed;
209 }
210 
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212 {
213 	return __sk_msg_free(sk, msg, msg->sg.start, false);
214 }
215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216 
sk_msg_free(struct sock * sk,struct sk_msg * msg)217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218 {
219 	return __sk_msg_free(sk, msg, msg->sg.start, true);
220 }
221 EXPORT_SYMBOL_GPL(sk_msg_free);
222 
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 				  u32 bytes, bool charge)
225 {
226 	struct scatterlist *sge;
227 	u32 i = msg->sg.start;
228 
229 	while (bytes) {
230 		sge = sk_msg_elem(msg, i);
231 		if (!sge->length)
232 			break;
233 		if (bytes < sge->length) {
234 			if (charge)
235 				sk_mem_uncharge(sk, bytes);
236 			sge->length -= bytes;
237 			sge->offset += bytes;
238 			msg->sg.size -= bytes;
239 			break;
240 		}
241 
242 		msg->sg.size -= sge->length;
243 		bytes -= sge->length;
244 		sk_msg_free_elem(sk, msg, i, charge);
245 		sk_msg_iter_var_next(i);
246 		sk_msg_check_to_free(msg, i, bytes);
247 	}
248 	msg->sg.start = i;
249 }
250 
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252 {
253 	__sk_msg_free_partial(sk, msg, bytes, true);
254 }
255 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256 
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 				  u32 bytes)
259 {
260 	__sk_msg_free_partial(sk, msg, bytes, false);
261 }
262 
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264 {
265 	int trim = msg->sg.size - len;
266 	u32 i = msg->sg.end;
267 
268 	if (trim <= 0) {
269 		WARN_ON(trim < 0);
270 		return;
271 	}
272 
273 	sk_msg_iter_var_prev(i);
274 	msg->sg.size = len;
275 	while (msg->sg.data[i].length &&
276 	       trim >= msg->sg.data[i].length) {
277 		trim -= msg->sg.data[i].length;
278 		sk_msg_free_elem(sk, msg, i, true);
279 		sk_msg_iter_var_prev(i);
280 		if (!trim)
281 			goto out;
282 	}
283 
284 	msg->sg.data[i].length -= trim;
285 	sk_mem_uncharge(sk, trim);
286 	/* Adjust copybreak if it falls into the trimmed part of last buf */
287 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 		msg->sg.copybreak = msg->sg.data[i].length;
289 out:
290 	sk_msg_iter_var_next(i);
291 	msg->sg.end = i;
292 
293 	/* If we trim data a full sg elem before curr pointer update
294 	 * copybreak and current so that any future copy operations
295 	 * start at new copy location.
296 	 * However trimmed data that has not yet been used in a copy op
297 	 * does not require an update.
298 	 */
299 	if (!msg->sg.size) {
300 		msg->sg.curr = msg->sg.start;
301 		msg->sg.copybreak = 0;
302 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 		sk_msg_iter_var_prev(i);
305 		msg->sg.curr = i;
306 		msg->sg.copybreak = msg->sg.data[i].length;
307 	}
308 }
309 EXPORT_SYMBOL_GPL(sk_msg_trim);
310 
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 			      struct sk_msg *msg, u32 bytes)
313 {
314 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 	const int to_max_pages = MAX_MSG_FRAGS;
316 	struct page *pages[MAX_MSG_FRAGS];
317 	ssize_t orig, copied, use, offset;
318 
319 	orig = msg->sg.size;
320 	while (bytes > 0) {
321 		i = 0;
322 		maxpages = to_max_pages - num_elems;
323 		if (maxpages == 0) {
324 			ret = -EFAULT;
325 			goto out;
326 		}
327 
328 		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 					    &offset);
330 		if (copied <= 0) {
331 			ret = -EFAULT;
332 			goto out;
333 		}
334 
335 		bytes -= copied;
336 		msg->sg.size += copied;
337 
338 		while (copied) {
339 			use = min_t(int, copied, PAGE_SIZE - offset);
340 			sg_set_page(&msg->sg.data[msg->sg.end],
341 				    pages[i], use, offset);
342 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 			sk_mem_charge(sk, use);
344 
345 			offset = 0;
346 			copied -= use;
347 			sk_msg_iter_next(msg, end);
348 			num_elems++;
349 			i++;
350 		}
351 		/* When zerocopy is mixed with sk_msg_*copy* operations we
352 		 * may have a copybreak set in this case clear and prefer
353 		 * zerocopy remainder when possible.
354 		 */
355 		msg->sg.copybreak = 0;
356 		msg->sg.curr = msg->sg.end;
357 	}
358 out:
359 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
360 	 * also needs to be cleared.
361 	 */
362 	if (ret)
363 		iov_iter_revert(from, msg->sg.size - orig);
364 	return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367 
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 			     struct sk_msg *msg, u32 bytes)
370 {
371 	int ret = -ENOSPC, i = msg->sg.curr;
372 	u32 copy, buf_size, copied = 0;
373 	struct scatterlist *sge;
374 	void *to;
375 
376 	do {
377 		sge = sk_msg_elem(msg, i);
378 		/* This is possible if a trim operation shrunk the buffer */
379 		if (msg->sg.copybreak >= sge->length) {
380 			msg->sg.copybreak = 0;
381 			sk_msg_iter_var_next(i);
382 			if (i == msg->sg.end)
383 				break;
384 			sge = sk_msg_elem(msg, i);
385 		}
386 
387 		buf_size = sge->length - msg->sg.copybreak;
388 		copy = (buf_size > bytes) ? bytes : buf_size;
389 		to = sg_virt(sge) + msg->sg.copybreak;
390 		msg->sg.copybreak += copy;
391 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 			ret = copy_from_iter_nocache(to, copy, from);
393 		else
394 			ret = copy_from_iter(to, copy, from);
395 		if (ret != copy) {
396 			ret = -EFAULT;
397 			goto out;
398 		}
399 		bytes -= copy;
400 		copied += copy;
401 		if (!bytes)
402 			break;
403 		msg->sg.copybreak = 0;
404 		sk_msg_iter_var_next(i);
405 	} while (i != msg->sg.end);
406 out:
407 	msg->sg.curr = i;
408 	return (ret < 0) ? ret : copied;
409 }
410 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
411 
412 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)413 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
414 		   int len, int flags)
415 {
416 	struct iov_iter *iter = &msg->msg_iter;
417 	int peek = flags & MSG_PEEK;
418 	struct sk_msg *msg_rx;
419 	int i, copied = 0;
420 
421 	msg_rx = sk_psock_peek_msg(psock);
422 	while (copied != len) {
423 		struct scatterlist *sge;
424 
425 		if (unlikely(!msg_rx))
426 			break;
427 
428 		i = msg_rx->sg.start;
429 		do {
430 			struct page *page;
431 			int copy;
432 
433 			sge = sk_msg_elem(msg_rx, i);
434 			copy = sge->length;
435 			page = sg_page(sge);
436 			if (copied + copy > len)
437 				copy = len - copied;
438 			if (copy)
439 				copy = copy_page_to_iter(page, sge->offset, copy, iter);
440 			if (!copy) {
441 				copied = copied ? copied : -EFAULT;
442 				goto out;
443 			}
444 
445 			copied += copy;
446 			if (likely(!peek)) {
447 				sge->offset += copy;
448 				sge->length -= copy;
449 				if (!msg_rx->skb) {
450 					sk_mem_uncharge(sk, copy);
451 					atomic_sub(copy, &sk->sk_rmem_alloc);
452 				}
453 				msg_rx->sg.size -= copy;
454 
455 				if (!sge->length) {
456 					sk_msg_iter_var_next(i);
457 					if (!msg_rx->skb)
458 						put_page(page);
459 				}
460 			} else {
461 				/* Lets not optimize peek case if copy_page_to_iter
462 				 * didn't copy the entire length lets just break.
463 				 */
464 				if (copy != sge->length)
465 					goto out;
466 				sk_msg_iter_var_next(i);
467 			}
468 
469 			if (copied == len)
470 				break;
471 		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
472 
473 		if (unlikely(peek)) {
474 			msg_rx = sk_psock_next_msg(psock, msg_rx);
475 			if (!msg_rx)
476 				break;
477 			continue;
478 		}
479 
480 		msg_rx->sg.start = i;
481 		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
482 			msg_rx = sk_psock_dequeue_msg(psock);
483 			kfree_sk_msg(msg_rx);
484 		}
485 		msg_rx = sk_psock_peek_msg(psock);
486 	}
487 out:
488 	return copied;
489 }
490 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
491 
sk_msg_is_readable(struct sock * sk)492 bool sk_msg_is_readable(struct sock *sk)
493 {
494 	struct sk_psock *psock;
495 	bool empty = true;
496 
497 	rcu_read_lock();
498 	psock = sk_psock(sk);
499 	if (likely(psock))
500 		empty = list_empty(&psock->ingress_msg);
501 	rcu_read_unlock();
502 	return !empty;
503 }
504 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
505 
alloc_sk_msg(gfp_t gfp)506 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
507 {
508 	struct sk_msg *msg;
509 
510 	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
511 	if (unlikely(!msg))
512 		return NULL;
513 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
514 	return msg;
515 }
516 
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)517 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
518 						  struct sk_buff *skb)
519 {
520 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
521 		return NULL;
522 
523 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
524 		return NULL;
525 
526 	return alloc_sk_msg(GFP_KERNEL);
527 }
528 
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,u32 off,u32 len,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg,bool take_ref)529 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
530 					u32 off, u32 len,
531 					struct sk_psock *psock,
532 					struct sock *sk,
533 					struct sk_msg *msg,
534 					bool take_ref)
535 {
536 	int num_sge, copied;
537 
538 	/* skb_to_sgvec will fail when the total number of fragments in
539 	 * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the
540 	 * caller may aggregate multiple skbs.
541 	 */
542 	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
543 	if (num_sge < 0) {
544 		/* skb linearize may fail with ENOMEM, but lets simply try again
545 		 * later if this happens. Under memory pressure we don't want to
546 		 * drop the skb. We need to linearize the skb so that the mapping
547 		 * in skb_to_sgvec can not error.
548 		 * Note that skb_linearize requires the skb not to be shared.
549 		 */
550 		if (skb_linearize(skb))
551 			return -EAGAIN;
552 
553 		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
554 		if (unlikely(num_sge < 0))
555 			return num_sge;
556 	}
557 
558 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
559 	psock->ingress_bytes += len;
560 #endif
561 	copied = len;
562 	msg->sg.start = 0;
563 	msg->sg.size = copied;
564 	msg->sg.end = num_sge;
565 	msg->skb = take_ref ? skb_get(skb) : skb;
566 
567 	sk_psock_queue_msg(psock, msg);
568 	sk_psock_data_ready(sk, psock);
569 	return copied;
570 }
571 
572 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
573 				     u32 off, u32 len, bool take_ref);
574 
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)575 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
576 				u32 off, u32 len)
577 {
578 	struct sock *sk = psock->sk;
579 	struct sk_msg *msg;
580 	int err;
581 
582 	/* If we are receiving on the same sock skb->sk is already assigned,
583 	 * skip memory accounting and owner transition seeing it already set
584 	 * correctly.
585 	 */
586 	if (unlikely(skb->sk == sk))
587 		return sk_psock_skb_ingress_self(psock, skb, off, len, true);
588 	msg = sk_psock_create_ingress_msg(sk, skb);
589 	if (!msg)
590 		return -EAGAIN;
591 
592 	/* This will transition ownership of the data from the socket where
593 	 * the BPF program was run initiating the redirect to the socket
594 	 * we will eventually receive this data on. The data will be released
595 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
596 	 * into user buffers.
597 	 */
598 	skb_set_owner_r(skb, sk);
599 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
600 	if (err < 0)
601 		kfree(msg);
602 	return err;
603 }
604 
605 /* Puts an skb on the ingress queue of the socket already assigned to the
606  * skb. In this case we do not need to check memory limits or skb_set_owner_r
607  * because the skb is already accounted for here.
608  */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool take_ref)609 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
610 				     u32 off, u32 len, bool take_ref)
611 {
612 	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
613 	struct sock *sk = psock->sk;
614 	int err;
615 
616 	if (unlikely(!msg))
617 		return -EAGAIN;
618 	skb_set_owner_r(skb, sk);
619 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
620 	if (err < 0)
621 		kfree(msg);
622 	return err;
623 }
624 
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)625 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
626 			       u32 off, u32 len, bool ingress)
627 {
628 	if (!ingress) {
629 		if (!sock_writeable(psock->sk))
630 			return -EAGAIN;
631 		return skb_send_sock(psock->sk, skb, off, len);
632 	}
633 
634 	return sk_psock_skb_ingress(psock, skb, off, len);
635 }
636 
sk_psock_skb_state(struct sk_psock * psock,struct sk_psock_work_state * state,int len,int off)637 static void sk_psock_skb_state(struct sk_psock *psock,
638 			       struct sk_psock_work_state *state,
639 			       int len, int off)
640 {
641 	spin_lock_bh(&psock->ingress_lock);
642 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
643 		state->len = len;
644 		state->off = off;
645 	}
646 	spin_unlock_bh(&psock->ingress_lock);
647 }
648 
sk_psock_backlog(struct work_struct * work)649 static void sk_psock_backlog(struct work_struct *work)
650 {
651 	struct delayed_work *dwork = to_delayed_work(work);
652 	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
653 	struct sk_psock_work_state *state = &psock->work_state;
654 	struct sk_buff *skb = NULL;
655 	u32 len = 0, off = 0;
656 	bool ingress;
657 	int ret;
658 
659 	/* If sk is quickly removed from the map and then added back, the old
660 	 * psock should not be scheduled, because there are now two psocks
661 	 * pointing to the same sk.
662 	 */
663 	if (!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
664 		return;
665 
666 	/* Increment the psock refcnt to synchronize with close(fd) path in
667 	 * sock_map_close(), ensuring we wait for backlog thread completion
668 	 * before sk_socket freed. If refcnt increment fails, it indicates
669 	 * sock_map_close() completed with sk_socket potentially already freed.
670 	 */
671 	if (!sk_psock_get(psock->sk))
672 		return;
673 	mutex_lock(&psock->work_mutex);
674 	while ((skb = skb_peek(&psock->ingress_skb))) {
675 		len = skb->len;
676 		off = 0;
677 		if (skb_bpf_strparser(skb)) {
678 			struct strp_msg *stm = strp_msg(skb);
679 
680 			off = stm->offset;
681 			len = stm->full_len;
682 		}
683 
684 		/* Resume processing from previous partial state */
685 		if (unlikely(state->len)) {
686 			len = state->len;
687 			off = state->off;
688 		}
689 
690 		ingress = skb_bpf_ingress(skb);
691 		skb_bpf_redirect_clear(skb);
692 		do {
693 			ret = -EIO;
694 			if (!sock_flag(psock->sk, SOCK_DEAD))
695 				ret = sk_psock_handle_skb(psock, skb, off,
696 							  len, ingress);
697 			if (ret <= 0) {
698 				if (ret == -EAGAIN) {
699 					sk_psock_skb_state(psock, state, len, off);
700 					/* Restore redir info we cleared before */
701 					skb_bpf_set_redir(skb, psock->sk, ingress);
702 					/* Delay slightly to prioritize any
703 					 * other work that might be here.
704 					 */
705 					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
706 						schedule_delayed_work(&psock->work, 1);
707 					goto end;
708 				}
709 				/* Hard errors break pipe and stop xmit. */
710 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
711 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
712 				goto end;
713 			}
714 			off += ret;
715 			len -= ret;
716 		} while (len);
717 
718 		/* The entire skb sent, clear state */
719 		sk_psock_skb_state(psock, state, 0, 0);
720 		skb = skb_dequeue(&psock->ingress_skb);
721 		kfree_skb(skb);
722 	}
723 end:
724 	mutex_unlock(&psock->work_mutex);
725 	sk_psock_put(psock->sk, psock);
726 }
727 
sk_psock_init(struct sock * sk,int node)728 struct sk_psock *sk_psock_init(struct sock *sk, int node)
729 {
730 	struct sk_psock *psock;
731 	struct proto *prot;
732 
733 	write_lock_bh(&sk->sk_callback_lock);
734 
735 	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
736 		psock = ERR_PTR(-EINVAL);
737 		goto out;
738 	}
739 
740 	if (sk->sk_user_data) {
741 		psock = ERR_PTR(-EBUSY);
742 		goto out;
743 	}
744 
745 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
746 	if (!psock) {
747 		psock = ERR_PTR(-ENOMEM);
748 		goto out;
749 	}
750 
751 	prot = READ_ONCE(sk->sk_prot);
752 	psock->sk = sk;
753 	psock->eval = __SK_NONE;
754 	psock->sk_proto = prot;
755 	psock->saved_unhash = prot->unhash;
756 	psock->saved_destroy = prot->destroy;
757 	psock->saved_close = prot->close;
758 	psock->saved_write_space = sk->sk_write_space;
759 
760 	INIT_LIST_HEAD(&psock->link);
761 	spin_lock_init(&psock->link_lock);
762 
763 	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
764 	mutex_init(&psock->work_mutex);
765 	INIT_LIST_HEAD(&psock->ingress_msg);
766 	spin_lock_init(&psock->ingress_lock);
767 	skb_queue_head_init(&psock->ingress_skb);
768 
769 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
770 	refcount_set(&psock->refcnt, 1);
771 
772 	__rcu_assign_sk_user_data_with_flags(sk, psock,
773 					     SK_USER_DATA_NOCOPY |
774 					     SK_USER_DATA_PSOCK);
775 	sock_hold(sk);
776 
777 out:
778 	write_unlock_bh(&sk->sk_callback_lock);
779 	return psock;
780 }
781 EXPORT_SYMBOL_GPL(sk_psock_init);
782 
sk_psock_link_pop(struct sk_psock * psock)783 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
784 {
785 	struct sk_psock_link *link;
786 
787 	spin_lock_bh(&psock->link_lock);
788 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
789 					list);
790 	if (link)
791 		list_del(&link->list);
792 	spin_unlock_bh(&psock->link_lock);
793 	return link;
794 }
795 
__sk_psock_purge_ingress_msg(struct sk_psock * psock)796 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
797 {
798 	struct sk_msg *msg, *tmp;
799 
800 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
801 		list_del(&msg->list);
802 		if (!msg->skb)
803 			atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
804 		sk_msg_free(psock->sk, msg);
805 		kfree(msg);
806 	}
807 }
808 
__sk_psock_zap_ingress(struct sk_psock * psock)809 static void __sk_psock_zap_ingress(struct sk_psock *psock)
810 {
811 	struct sk_buff *skb;
812 
813 	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
814 		skb_bpf_redirect_clear(skb);
815 		sock_drop(psock->sk, skb);
816 	}
817 	__sk_psock_purge_ingress_msg(psock);
818 }
819 
sk_psock_link_destroy(struct sk_psock * psock)820 static void sk_psock_link_destroy(struct sk_psock *psock)
821 {
822 	struct sk_psock_link *link, *tmp;
823 
824 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
825 		list_del(&link->list);
826 		sk_psock_free_link(link);
827 	}
828 }
829 
sk_psock_stop(struct sk_psock * psock)830 void sk_psock_stop(struct sk_psock *psock)
831 {
832 	spin_lock_bh(&psock->ingress_lock);
833 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
834 	sk_psock_cork_free(psock);
835 	spin_unlock_bh(&psock->ingress_lock);
836 }
837 
838 static void sk_psock_done_strp(struct sk_psock *psock);
839 
sk_psock_destroy(struct work_struct * work)840 static void sk_psock_destroy(struct work_struct *work)
841 {
842 	struct sk_psock *psock = container_of(to_rcu_work(work),
843 					      struct sk_psock, rwork);
844 	/* No sk_callback_lock since already detached. */
845 
846 	sk_psock_done_strp(psock);
847 
848 	cancel_delayed_work_sync(&psock->work);
849 	__sk_psock_zap_ingress(psock);
850 	mutex_destroy(&psock->work_mutex);
851 
852 	psock_progs_drop(&psock->progs);
853 
854 	sk_psock_link_destroy(psock);
855 	sk_psock_cork_free(psock);
856 
857 	if (psock->sk_redir)
858 		sock_put(psock->sk_redir);
859 	if (psock->sk_pair)
860 		sock_put(psock->sk_pair);
861 	sock_put(psock->sk);
862 	kfree(psock);
863 }
864 
sk_psock_drop(struct sock * sk,struct sk_psock * psock)865 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
866 {
867 	write_lock_bh(&sk->sk_callback_lock);
868 	sk_psock_restore_proto(sk, psock);
869 	rcu_assign_sk_user_data(sk, NULL);
870 	if (psock->progs.stream_parser)
871 		sk_psock_stop_strp(sk, psock);
872 	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
873 		sk_psock_stop_verdict(sk, psock);
874 	write_unlock_bh(&sk->sk_callback_lock);
875 
876 	sk_psock_stop(psock);
877 
878 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
879 	queue_rcu_work(system_wq, &psock->rwork);
880 }
881 EXPORT_SYMBOL_GPL(sk_psock_drop);
882 
sk_psock_map_verd(int verdict,bool redir)883 static int sk_psock_map_verd(int verdict, bool redir)
884 {
885 	switch (verdict) {
886 	case SK_PASS:
887 		return redir ? __SK_REDIRECT : __SK_PASS;
888 	case SK_DROP:
889 	default:
890 		break;
891 	}
892 
893 	return __SK_DROP;
894 }
895 
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)896 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
897 			 struct sk_msg *msg)
898 {
899 	struct bpf_prog *prog;
900 	int ret;
901 
902 	rcu_read_lock();
903 	prog = READ_ONCE(psock->progs.msg_parser);
904 	if (unlikely(!prog)) {
905 		ret = __SK_PASS;
906 		goto out;
907 	}
908 
909 	sk_msg_compute_data_pointers(msg);
910 	msg->sk = sk;
911 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
912 	ret = sk_psock_map_verd(ret, msg->sk_redir);
913 	psock->apply_bytes = msg->apply_bytes;
914 	if (ret == __SK_REDIRECT) {
915 		if (psock->sk_redir) {
916 			sock_put(psock->sk_redir);
917 			psock->sk_redir = NULL;
918 		}
919 		if (!msg->sk_redir) {
920 			ret = __SK_DROP;
921 			goto out;
922 		}
923 		psock->redir_ingress = sk_msg_to_ingress(msg);
924 		psock->sk_redir = msg->sk_redir;
925 		sock_hold(psock->sk_redir);
926 	}
927 out:
928 	rcu_read_unlock();
929 	return ret;
930 }
931 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
932 
sk_psock_skb_redirect(struct sk_psock * from,struct sk_buff * skb)933 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
934 {
935 	struct sk_psock *psock_other;
936 	struct sock *sk_other;
937 
938 	sk_other = skb_bpf_redirect_fetch(skb);
939 	/* This error is a buggy BPF program, it returned a redirect
940 	 * return code, but then didn't set a redirect interface.
941 	 */
942 	if (unlikely(!sk_other)) {
943 		skb_bpf_redirect_clear(skb);
944 		sock_drop(from->sk, skb);
945 		return -EIO;
946 	}
947 	psock_other = sk_psock(sk_other);
948 	/* This error indicates the socket is being torn down or had another
949 	 * error that caused the pipe to break. We can't send a packet on
950 	 * a socket that is in this state so we drop the skb.
951 	 */
952 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
953 		skb_bpf_redirect_clear(skb);
954 		sock_drop(from->sk, skb);
955 		return -EIO;
956 	}
957 	spin_lock_bh(&psock_other->ingress_lock);
958 	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
959 		spin_unlock_bh(&psock_other->ingress_lock);
960 		skb_bpf_redirect_clear(skb);
961 		sock_drop(from->sk, skb);
962 		return -EIO;
963 	}
964 
965 	skb_queue_tail(&psock_other->ingress_skb, skb);
966 	schedule_delayed_work(&psock_other->work, 0);
967 	spin_unlock_bh(&psock_other->ingress_lock);
968 	return 0;
969 }
970 
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sk_psock * from,int verdict)971 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
972 				       struct sk_psock *from, int verdict)
973 {
974 	switch (verdict) {
975 	case __SK_REDIRECT:
976 		sk_psock_skb_redirect(from, skb);
977 		break;
978 	case __SK_PASS:
979 	case __SK_DROP:
980 	default:
981 		break;
982 	}
983 }
984 
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)985 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
986 {
987 	struct bpf_prog *prog;
988 	int ret = __SK_PASS;
989 
990 	rcu_read_lock();
991 	prog = READ_ONCE(psock->progs.stream_verdict);
992 	if (likely(prog)) {
993 		skb->sk = psock->sk;
994 		skb_dst_drop(skb);
995 		skb_bpf_redirect_clear(skb);
996 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
997 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
998 		skb->sk = NULL;
999 	}
1000 	sk_psock_tls_verdict_apply(skb, psock, ret);
1001 	rcu_read_unlock();
1002 	return ret;
1003 }
1004 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
1005 
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)1006 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
1007 				  int verdict)
1008 {
1009 	struct sock *sk_other;
1010 	int err = 0;
1011 	u32 len, off;
1012 
1013 	switch (verdict) {
1014 	case __SK_PASS:
1015 		err = -EIO;
1016 		sk_other = psock->sk;
1017 		if (sock_flag(sk_other, SOCK_DEAD) ||
1018 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1019 			goto out_free;
1020 
1021 		skb_bpf_set_ingress(skb);
1022 
1023 		/* If the queue is empty then we can submit directly
1024 		 * into the msg queue. If its not empty we have to
1025 		 * queue work otherwise we may get OOO data. Otherwise,
1026 		 * if sk_psock_skb_ingress errors will be handled by
1027 		 * retrying later from workqueue.
1028 		 */
1029 		if (skb_queue_empty(&psock->ingress_skb)) {
1030 			len = skb->len;
1031 			off = 0;
1032 			if (skb_bpf_strparser(skb)) {
1033 				struct strp_msg *stm = strp_msg(skb);
1034 
1035 				off = stm->offset;
1036 				len = stm->full_len;
1037 			}
1038 			err = sk_psock_skb_ingress_self(psock, skb, off, len, false);
1039 		}
1040 		if (err < 0) {
1041 			spin_lock_bh(&psock->ingress_lock);
1042 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1043 				skb_queue_tail(&psock->ingress_skb, skb);
1044 				schedule_delayed_work(&psock->work, 0);
1045 				err = 0;
1046 			}
1047 			spin_unlock_bh(&psock->ingress_lock);
1048 			if (err < 0)
1049 				goto out_free;
1050 		}
1051 		break;
1052 	case __SK_REDIRECT:
1053 		tcp_eat_skb(psock->sk, skb);
1054 		err = sk_psock_skb_redirect(psock, skb);
1055 		break;
1056 	case __SK_DROP:
1057 	default:
1058 out_free:
1059 		skb_bpf_redirect_clear(skb);
1060 		tcp_eat_skb(psock->sk, skb);
1061 		sock_drop(psock->sk, skb);
1062 	}
1063 
1064 	return err;
1065 }
1066 
sk_psock_write_space(struct sock * sk)1067 static void sk_psock_write_space(struct sock *sk)
1068 {
1069 	struct sk_psock *psock;
1070 	void (*write_space)(struct sock *sk) = NULL;
1071 
1072 	rcu_read_lock();
1073 	psock = sk_psock(sk);
1074 	if (likely(psock)) {
1075 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1076 			schedule_delayed_work(&psock->work, 0);
1077 		write_space = psock->saved_write_space;
1078 	}
1079 	rcu_read_unlock();
1080 	if (write_space)
1081 		write_space(sk);
1082 }
1083 
1084 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)1085 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1086 {
1087 	struct sk_psock *psock;
1088 	struct bpf_prog *prog;
1089 	int ret = __SK_DROP;
1090 	struct sock *sk;
1091 
1092 	rcu_read_lock();
1093 	sk = strp->sk;
1094 	psock = sk_psock(sk);
1095 	if (unlikely(!psock)) {
1096 		sock_drop(sk, skb);
1097 		goto out;
1098 	}
1099 	prog = READ_ONCE(psock->progs.stream_verdict);
1100 	if (likely(prog)) {
1101 		skb->sk = sk;
1102 		skb_dst_drop(skb);
1103 		skb_bpf_redirect_clear(skb);
1104 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1105 		skb_bpf_set_strparser(skb);
1106 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1107 		skb->sk = NULL;
1108 	}
1109 	sk_psock_verdict_apply(psock, skb, ret);
1110 out:
1111 	rcu_read_unlock();
1112 }
1113 
sk_psock_strp_read_done(struct strparser * strp,int err)1114 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1115 {
1116 	return err;
1117 }
1118 
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1119 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1120 {
1121 	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1122 	struct bpf_prog *prog;
1123 	int ret = skb->len;
1124 
1125 	rcu_read_lock();
1126 	prog = READ_ONCE(psock->progs.stream_parser);
1127 	if (likely(prog)) {
1128 		skb->sk = psock->sk;
1129 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1130 		skb->sk = NULL;
1131 	}
1132 	rcu_read_unlock();
1133 	return ret;
1134 }
1135 
1136 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1137 static void sk_psock_strp_data_ready(struct sock *sk)
1138 {
1139 	struct sk_psock *psock;
1140 
1141 	trace_sk_data_ready(sk);
1142 
1143 	rcu_read_lock();
1144 	psock = sk_psock(sk);
1145 	if (likely(psock)) {
1146 		if (tls_sw_has_ctx_rx(sk)) {
1147 			psock->saved_data_ready(sk);
1148 		} else {
1149 			read_lock_bh(&sk->sk_callback_lock);
1150 			strp_data_ready(&psock->strp);
1151 			read_unlock_bh(&sk->sk_callback_lock);
1152 		}
1153 	}
1154 	rcu_read_unlock();
1155 }
1156 
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1157 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1158 {
1159 	int ret;
1160 
1161 	static const struct strp_callbacks cb = {
1162 		.rcv_msg	= sk_psock_strp_read,
1163 		.read_sock_done	= sk_psock_strp_read_done,
1164 		.parse_msg	= sk_psock_strp_parse,
1165 	};
1166 
1167 	ret = strp_init(&psock->strp, sk, &cb);
1168 	if (!ret)
1169 		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1170 
1171 	if (sk_is_tcp(sk)) {
1172 		psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
1173 		psock->copied_seq = tcp_sk(sk)->copied_seq;
1174 	}
1175 	return ret;
1176 }
1177 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1178 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1179 {
1180 	if (psock->saved_data_ready)
1181 		return;
1182 
1183 	psock->saved_data_ready = sk->sk_data_ready;
1184 	sk->sk_data_ready = sk_psock_strp_data_ready;
1185 	sk->sk_write_space = sk_psock_write_space;
1186 }
1187 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1188 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1189 {
1190 	psock_set_prog(&psock->progs.stream_parser, NULL);
1191 
1192 	if (!psock->saved_data_ready)
1193 		return;
1194 
1195 	sk->sk_data_ready = psock->saved_data_ready;
1196 	psock->saved_data_ready = NULL;
1197 	strp_stop(&psock->strp);
1198 }
1199 
sk_psock_done_strp(struct sk_psock * psock)1200 static void sk_psock_done_strp(struct sk_psock *psock)
1201 {
1202 	/* Parser has been stopped */
1203 	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1204 		strp_done(&psock->strp);
1205 }
1206 #else
sk_psock_done_strp(struct sk_psock * psock)1207 static void sk_psock_done_strp(struct sk_psock *psock)
1208 {
1209 }
1210 #endif /* CONFIG_BPF_STREAM_PARSER */
1211 
sk_psock_verdict_recv(struct sock * sk,struct sk_buff * skb)1212 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1213 {
1214 	struct sk_psock *psock;
1215 	struct bpf_prog *prog;
1216 	int ret = __SK_DROP;
1217 	int len = skb->len;
1218 
1219 	rcu_read_lock();
1220 	psock = sk_psock(sk);
1221 	if (unlikely(!psock)) {
1222 		len = 0;
1223 		tcp_eat_skb(sk, skb);
1224 		sock_drop(sk, skb);
1225 		goto out;
1226 	}
1227 	prog = READ_ONCE(psock->progs.stream_verdict);
1228 	if (!prog)
1229 		prog = READ_ONCE(psock->progs.skb_verdict);
1230 	if (likely(prog)) {
1231 		skb_dst_drop(skb);
1232 		skb_bpf_redirect_clear(skb);
1233 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1234 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1235 	}
1236 	ret = sk_psock_verdict_apply(psock, skb, ret);
1237 	if (ret < 0)
1238 		len = ret;
1239 out:
1240 	rcu_read_unlock();
1241 	return len;
1242 }
1243 
sk_psock_verdict_data_ready(struct sock * sk)1244 static void sk_psock_verdict_data_ready(struct sock *sk)
1245 {
1246 	struct socket *sock = sk->sk_socket;
1247 	const struct proto_ops *ops;
1248 	int copied;
1249 
1250 	trace_sk_data_ready(sk);
1251 
1252 	if (unlikely(!sock))
1253 		return;
1254 	ops = READ_ONCE(sock->ops);
1255 	if (!ops || !ops->read_skb)
1256 		return;
1257 	copied = ops->read_skb(sk, sk_psock_verdict_recv);
1258 	if (copied >= 0) {
1259 		struct sk_psock *psock;
1260 
1261 		rcu_read_lock();
1262 		psock = sk_psock(sk);
1263 		if (psock)
1264 			sk_psock_data_ready(sk, psock);
1265 		rcu_read_unlock();
1266 	}
1267 }
1268 
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1269 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1270 {
1271 	if (psock->saved_data_ready)
1272 		return;
1273 
1274 	psock->saved_data_ready = sk->sk_data_ready;
1275 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1276 	sk->sk_write_space = sk_psock_write_space;
1277 }
1278 
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1279 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1280 {
1281 	psock_set_prog(&psock->progs.stream_verdict, NULL);
1282 	psock_set_prog(&psock->progs.skb_verdict, NULL);
1283 
1284 	if (!psock->saved_data_ready)
1285 		return;
1286 
1287 	sk->sk_data_ready = psock->saved_data_ready;
1288 	psock->saved_data_ready = NULL;
1289 }
1290