xref: /linux/net/core/skmsg.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14 	if (msg->sg.end > msg->sg.start &&
15 	    elem_first_coalesce < msg->sg.end)
16 		return true;
17 
18 	if (msg->sg.end < msg->sg.start &&
19 	    (elem_first_coalesce > msg->sg.start ||
20 	     elem_first_coalesce < msg->sg.end))
21 		return true;
22 
23 	return false;
24 }
25 
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 		 int elem_first_coalesce)
28 {
29 	struct page_frag *pfrag = sk_page_frag(sk);
30 	int ret = 0;
31 
32 	len -= msg->sg.size;
33 	while (len > 0) {
34 		struct scatterlist *sge;
35 		u32 orig_offset;
36 		int use, i;
37 
38 		if (!sk_page_frag_refill(sk, pfrag))
39 			return -ENOMEM;
40 
41 		orig_offset = pfrag->offset;
42 		use = min_t(int, len, pfrag->size - orig_offset);
43 		if (!sk_wmem_schedule(sk, use))
44 			return -ENOMEM;
45 
46 		i = msg->sg.end;
47 		sk_msg_iter_var_prev(i);
48 		sge = &msg->sg.data[i];
49 
50 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 		    sg_page(sge) == pfrag->page &&
52 		    sge->offset + sge->length == orig_offset) {
53 			sge->length += use;
54 		} else {
55 			if (sk_msg_full(msg)) {
56 				ret = -ENOSPC;
57 				break;
58 			}
59 
60 			sge = &msg->sg.data[msg->sg.end];
61 			sg_unmark_end(sge);
62 			sg_set_page(sge, pfrag->page, use, orig_offset);
63 			get_page(pfrag->page);
64 			sk_msg_iter_next(msg, end);
65 		}
66 
67 		sk_mem_charge(sk, use);
68 		msg->sg.size += use;
69 		pfrag->offset += use;
70 		len -= use;
71 	}
72 
73 	return ret;
74 }
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
76 
77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 		 u32 off, u32 len)
79 {
80 	int i = src->sg.start;
81 	struct scatterlist *sge = sk_msg_elem(src, i);
82 	struct scatterlist *sgd = NULL;
83 	u32 sge_len, sge_off;
84 
85 	while (off) {
86 		if (sge->length > off)
87 			break;
88 		off -= sge->length;
89 		sk_msg_iter_var_next(i);
90 		if (i == src->sg.end && off)
91 			return -ENOSPC;
92 		sge = sk_msg_elem(src, i);
93 	}
94 
95 	while (len) {
96 		sge_len = sge->length - off;
97 		if (sge_len > len)
98 			sge_len = len;
99 
100 		if (dst->sg.end)
101 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
102 
103 		if (sgd &&
104 		    (sg_page(sge) == sg_page(sgd)) &&
105 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 			sgd->length += sge_len;
107 			dst->sg.size += sge_len;
108 		} else if (!sk_msg_full(dst)) {
109 			sge_off = sge->offset + off;
110 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 		} else {
112 			return -ENOSPC;
113 		}
114 
115 		off = 0;
116 		len -= sge_len;
117 		sk_mem_charge(sk, sge_len);
118 		sk_msg_iter_var_next(i);
119 		if (i == src->sg.end && len)
120 			return -ENOSPC;
121 		sge = sk_msg_elem(src, i);
122 	}
123 
124 	return 0;
125 }
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
127 
128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129 {
130 	int i = msg->sg.start;
131 
132 	do {
133 		struct scatterlist *sge = sk_msg_elem(msg, i);
134 
135 		if (bytes < sge->length) {
136 			sge->length -= bytes;
137 			sge->offset += bytes;
138 			sk_mem_uncharge(sk, bytes);
139 			break;
140 		}
141 
142 		sk_mem_uncharge(sk, sge->length);
143 		bytes -= sge->length;
144 		sge->length = 0;
145 		sge->offset = 0;
146 		sk_msg_iter_var_next(i);
147 	} while (bytes && i != msg->sg.end);
148 	msg->sg.start = i;
149 }
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151 
152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153 {
154 	int i = msg->sg.start;
155 
156 	do {
157 		struct scatterlist *sge = &msg->sg.data[i];
158 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
159 
160 		sk_mem_uncharge(sk, uncharge);
161 		bytes -= uncharge;
162 		sk_msg_iter_var_next(i);
163 	} while (i != msg->sg.end);
164 }
165 EXPORT_SYMBOL_GPL(sk_msg_return);
166 
167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 			    bool charge)
169 {
170 	struct scatterlist *sge = sk_msg_elem(msg, i);
171 	u32 len = sge->length;
172 
173 	/* When the skb owns the memory we free it from consume_skb path. */
174 	if (!msg->skb) {
175 		if (charge)
176 			sk_mem_uncharge(sk, len);
177 		put_page(sg_page(sge));
178 	}
179 	memset(sge, 0, sizeof(*sge));
180 	return len;
181 }
182 
183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184 			 bool charge)
185 {
186 	struct scatterlist *sge = sk_msg_elem(msg, i);
187 	int freed = 0;
188 
189 	while (msg->sg.size) {
190 		msg->sg.size -= sge->length;
191 		freed += sk_msg_free_elem(sk, msg, i, charge);
192 		sk_msg_iter_var_next(i);
193 		sk_msg_check_to_free(msg, i, msg->sg.size);
194 		sge = sk_msg_elem(msg, i);
195 	}
196 	consume_skb(msg->skb);
197 	sk_msg_init(msg);
198 	return freed;
199 }
200 
201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202 {
203 	return __sk_msg_free(sk, msg, msg->sg.start, false);
204 }
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206 
207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208 {
209 	return __sk_msg_free(sk, msg, msg->sg.start, true);
210 }
211 EXPORT_SYMBOL_GPL(sk_msg_free);
212 
213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 				  u32 bytes, bool charge)
215 {
216 	struct scatterlist *sge;
217 	u32 i = msg->sg.start;
218 
219 	while (bytes) {
220 		sge = sk_msg_elem(msg, i);
221 		if (!sge->length)
222 			break;
223 		if (bytes < sge->length) {
224 			if (charge)
225 				sk_mem_uncharge(sk, bytes);
226 			sge->length -= bytes;
227 			sge->offset += bytes;
228 			msg->sg.size -= bytes;
229 			break;
230 		}
231 
232 		msg->sg.size -= sge->length;
233 		bytes -= sge->length;
234 		sk_msg_free_elem(sk, msg, i, charge);
235 		sk_msg_iter_var_next(i);
236 		sk_msg_check_to_free(msg, i, bytes);
237 	}
238 	msg->sg.start = i;
239 }
240 
241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242 {
243 	__sk_msg_free_partial(sk, msg, bytes, true);
244 }
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246 
247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248 				  u32 bytes)
249 {
250 	__sk_msg_free_partial(sk, msg, bytes, false);
251 }
252 
253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254 {
255 	int trim = msg->sg.size - len;
256 	u32 i = msg->sg.end;
257 
258 	if (trim <= 0) {
259 		WARN_ON(trim < 0);
260 		return;
261 	}
262 
263 	sk_msg_iter_var_prev(i);
264 	msg->sg.size = len;
265 	while (msg->sg.data[i].length &&
266 	       trim >= msg->sg.data[i].length) {
267 		trim -= msg->sg.data[i].length;
268 		sk_msg_free_elem(sk, msg, i, true);
269 		sk_msg_iter_var_prev(i);
270 		if (!trim)
271 			goto out;
272 	}
273 
274 	msg->sg.data[i].length -= trim;
275 	sk_mem_uncharge(sk, trim);
276 	/* Adjust copybreak if it falls into the trimmed part of last buf */
277 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 		msg->sg.copybreak = msg->sg.data[i].length;
279 out:
280 	sk_msg_iter_var_next(i);
281 	msg->sg.end = i;
282 
283 	/* If we trim data a full sg elem before curr pointer update
284 	 * copybreak and current so that any future copy operations
285 	 * start at new copy location.
286 	 * However trimed data that has not yet been used in a copy op
287 	 * does not require an update.
288 	 */
289 	if (!msg->sg.size) {
290 		msg->sg.curr = msg->sg.start;
291 		msg->sg.copybreak = 0;
292 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 		sk_msg_iter_var_prev(i);
295 		msg->sg.curr = i;
296 		msg->sg.copybreak = msg->sg.data[i].length;
297 	}
298 }
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
300 
301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 			      struct sk_msg *msg, u32 bytes)
303 {
304 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 	const int to_max_pages = MAX_MSG_FRAGS;
306 	struct page *pages[MAX_MSG_FRAGS];
307 	ssize_t orig, copied, use, offset;
308 
309 	orig = msg->sg.size;
310 	while (bytes > 0) {
311 		i = 0;
312 		maxpages = to_max_pages - num_elems;
313 		if (maxpages == 0) {
314 			ret = -EFAULT;
315 			goto out;
316 		}
317 
318 		copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319 					    &offset);
320 		if (copied <= 0) {
321 			ret = -EFAULT;
322 			goto out;
323 		}
324 
325 		iov_iter_advance(from, copied);
326 		bytes -= copied;
327 		msg->sg.size += copied;
328 
329 		while (copied) {
330 			use = min_t(int, copied, PAGE_SIZE - offset);
331 			sg_set_page(&msg->sg.data[msg->sg.end],
332 				    pages[i], use, offset);
333 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 			sk_mem_charge(sk, use);
335 
336 			offset = 0;
337 			copied -= use;
338 			sk_msg_iter_next(msg, end);
339 			num_elems++;
340 			i++;
341 		}
342 		/* When zerocopy is mixed with sk_msg_*copy* operations we
343 		 * may have a copybreak set in this case clear and prefer
344 		 * zerocopy remainder when possible.
345 		 */
346 		msg->sg.copybreak = 0;
347 		msg->sg.curr = msg->sg.end;
348 	}
349 out:
350 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
351 	 * also needs to be cleared.
352 	 */
353 	if (ret)
354 		iov_iter_revert(from, msg->sg.size - orig);
355 	return ret;
356 }
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358 
359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 			     struct sk_msg *msg, u32 bytes)
361 {
362 	int ret = -ENOSPC, i = msg->sg.curr;
363 	struct scatterlist *sge;
364 	u32 copy, buf_size;
365 	void *to;
366 
367 	do {
368 		sge = sk_msg_elem(msg, i);
369 		/* This is possible if a trim operation shrunk the buffer */
370 		if (msg->sg.copybreak >= sge->length) {
371 			msg->sg.copybreak = 0;
372 			sk_msg_iter_var_next(i);
373 			if (i == msg->sg.end)
374 				break;
375 			sge = sk_msg_elem(msg, i);
376 		}
377 
378 		buf_size = sge->length - msg->sg.copybreak;
379 		copy = (buf_size > bytes) ? bytes : buf_size;
380 		to = sg_virt(sge) + msg->sg.copybreak;
381 		msg->sg.copybreak += copy;
382 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 			ret = copy_from_iter_nocache(to, copy, from);
384 		else
385 			ret = copy_from_iter(to, copy, from);
386 		if (ret != copy) {
387 			ret = -EFAULT;
388 			goto out;
389 		}
390 		bytes -= copy;
391 		if (!bytes)
392 			break;
393 		msg->sg.copybreak = 0;
394 		sk_msg_iter_var_next(i);
395 	} while (i != msg->sg.end);
396 out:
397 	msg->sg.curr = i;
398 	return ret;
399 }
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401 
402 /* Receive sk_msg from psock->ingress_msg to @msg. */
403 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
404 		   int len, int flags)
405 {
406 	struct iov_iter *iter = &msg->msg_iter;
407 	int peek = flags & MSG_PEEK;
408 	struct sk_msg *msg_rx;
409 	int i, copied = 0;
410 
411 	msg_rx = sk_psock_peek_msg(psock);
412 	while (copied != len) {
413 		struct scatterlist *sge;
414 
415 		if (unlikely(!msg_rx))
416 			break;
417 
418 		i = msg_rx->sg.start;
419 		do {
420 			struct page *page;
421 			int copy;
422 
423 			sge = sk_msg_elem(msg_rx, i);
424 			copy = sge->length;
425 			page = sg_page(sge);
426 			if (copied + copy > len)
427 				copy = len - copied;
428 			copy = copy_page_to_iter(page, sge->offset, copy, iter);
429 			if (!copy)
430 				return copied ? copied : -EFAULT;
431 
432 			copied += copy;
433 			if (likely(!peek)) {
434 				sge->offset += copy;
435 				sge->length -= copy;
436 				if (!msg_rx->skb)
437 					sk_mem_uncharge(sk, copy);
438 				msg_rx->sg.size -= copy;
439 
440 				if (!sge->length) {
441 					sk_msg_iter_var_next(i);
442 					if (!msg_rx->skb)
443 						put_page(page);
444 				}
445 			} else {
446 				/* Lets not optimize peek case if copy_page_to_iter
447 				 * didn't copy the entire length lets just break.
448 				 */
449 				if (copy != sge->length)
450 					return copied;
451 				sk_msg_iter_var_next(i);
452 			}
453 
454 			if (copied == len)
455 				break;
456 		} while (i != msg_rx->sg.end);
457 
458 		if (unlikely(peek)) {
459 			msg_rx = sk_psock_next_msg(psock, msg_rx);
460 			if (!msg_rx)
461 				break;
462 			continue;
463 		}
464 
465 		msg_rx->sg.start = i;
466 		if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
467 			msg_rx = sk_psock_dequeue_msg(psock);
468 			kfree_sk_msg(msg_rx);
469 		}
470 		msg_rx = sk_psock_peek_msg(psock);
471 	}
472 
473 	return copied;
474 }
475 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
476 
477 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
478 						  struct sk_buff *skb)
479 {
480 	struct sk_msg *msg;
481 
482 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
483 		return NULL;
484 
485 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
486 		return NULL;
487 
488 	msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
489 	if (unlikely(!msg))
490 		return NULL;
491 
492 	sk_msg_init(msg);
493 	return msg;
494 }
495 
496 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
497 					struct sk_psock *psock,
498 					struct sock *sk,
499 					struct sk_msg *msg)
500 {
501 	int num_sge, copied;
502 
503 	/* skb linearize may fail with ENOMEM, but lets simply try again
504 	 * later if this happens. Under memory pressure we don't want to
505 	 * drop the skb. We need to linearize the skb so that the mapping
506 	 * in skb_to_sgvec can not error.
507 	 */
508 	if (skb_linearize(skb))
509 		return -EAGAIN;
510 	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
511 	if (unlikely(num_sge < 0))
512 		return num_sge;
513 
514 	copied = skb->len;
515 	msg->sg.start = 0;
516 	msg->sg.size = copied;
517 	msg->sg.end = num_sge;
518 	msg->skb = skb;
519 
520 	sk_psock_queue_msg(psock, msg);
521 	sk_psock_data_ready(sk, psock);
522 	return copied;
523 }
524 
525 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
526 
527 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
528 {
529 	struct sock *sk = psock->sk;
530 	struct sk_msg *msg;
531 	int err;
532 
533 	/* If we are receiving on the same sock skb->sk is already assigned,
534 	 * skip memory accounting and owner transition seeing it already set
535 	 * correctly.
536 	 */
537 	if (unlikely(skb->sk == sk))
538 		return sk_psock_skb_ingress_self(psock, skb);
539 	msg = sk_psock_create_ingress_msg(sk, skb);
540 	if (!msg)
541 		return -EAGAIN;
542 
543 	/* This will transition ownership of the data from the socket where
544 	 * the BPF program was run initiating the redirect to the socket
545 	 * we will eventually receive this data on. The data will be released
546 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
547 	 * into user buffers.
548 	 */
549 	skb_set_owner_r(skb, sk);
550 	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
551 	if (err < 0)
552 		kfree(msg);
553 	return err;
554 }
555 
556 /* Puts an skb on the ingress queue of the socket already assigned to the
557  * skb. In this case we do not need to check memory limits or skb_set_owner_r
558  * because the skb is already accounted for here.
559  */
560 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
561 {
562 	struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
563 	struct sock *sk = psock->sk;
564 	int err;
565 
566 	if (unlikely(!msg))
567 		return -EAGAIN;
568 	sk_msg_init(msg);
569 	skb_set_owner_r(skb, sk);
570 	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
571 	if (err < 0)
572 		kfree(msg);
573 	return err;
574 }
575 
576 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
577 			       u32 off, u32 len, bool ingress)
578 {
579 	if (!ingress) {
580 		if (!sock_writeable(psock->sk))
581 			return -EAGAIN;
582 		return skb_send_sock(psock->sk, skb, off, len);
583 	}
584 	return sk_psock_skb_ingress(psock, skb);
585 }
586 
587 static void sock_drop(struct sock *sk, struct sk_buff *skb)
588 {
589 	sk_drops_add(sk, skb);
590 	kfree_skb(skb);
591 }
592 
593 static void sk_psock_backlog(struct work_struct *work)
594 {
595 	struct sk_psock *psock = container_of(work, struct sk_psock, work);
596 	struct sk_psock_work_state *state = &psock->work_state;
597 	struct sk_buff *skb;
598 	bool ingress;
599 	u32 len, off;
600 	int ret;
601 
602 	mutex_lock(&psock->work_mutex);
603 	if (state->skb) {
604 		skb = state->skb;
605 		len = state->len;
606 		off = state->off;
607 		state->skb = NULL;
608 		goto start;
609 	}
610 
611 	while ((skb = skb_dequeue(&psock->ingress_skb))) {
612 		len = skb->len;
613 		off = 0;
614 start:
615 		ingress = skb_bpf_ingress(skb);
616 		skb_bpf_redirect_clear(skb);
617 		do {
618 			ret = -EIO;
619 			if (!sock_flag(psock->sk, SOCK_DEAD))
620 				ret = sk_psock_handle_skb(psock, skb, off,
621 							  len, ingress);
622 			if (ret <= 0) {
623 				if (ret == -EAGAIN) {
624 					state->skb = skb;
625 					state->len = len;
626 					state->off = off;
627 					goto end;
628 				}
629 				/* Hard errors break pipe and stop xmit. */
630 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
631 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
632 				sock_drop(psock->sk, skb);
633 				goto end;
634 			}
635 			off += ret;
636 			len -= ret;
637 		} while (len);
638 
639 		if (!ingress)
640 			kfree_skb(skb);
641 	}
642 end:
643 	mutex_unlock(&psock->work_mutex);
644 }
645 
646 struct sk_psock *sk_psock_init(struct sock *sk, int node)
647 {
648 	struct sk_psock *psock;
649 	struct proto *prot;
650 
651 	write_lock_bh(&sk->sk_callback_lock);
652 
653 	if (sk->sk_user_data) {
654 		psock = ERR_PTR(-EBUSY);
655 		goto out;
656 	}
657 
658 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
659 	if (!psock) {
660 		psock = ERR_PTR(-ENOMEM);
661 		goto out;
662 	}
663 
664 	prot = READ_ONCE(sk->sk_prot);
665 	psock->sk = sk;
666 	psock->eval = __SK_NONE;
667 	psock->sk_proto = prot;
668 	psock->saved_unhash = prot->unhash;
669 	psock->saved_close = prot->close;
670 	psock->saved_write_space = sk->sk_write_space;
671 
672 	INIT_LIST_HEAD(&psock->link);
673 	spin_lock_init(&psock->link_lock);
674 
675 	INIT_WORK(&psock->work, sk_psock_backlog);
676 	mutex_init(&psock->work_mutex);
677 	INIT_LIST_HEAD(&psock->ingress_msg);
678 	spin_lock_init(&psock->ingress_lock);
679 	skb_queue_head_init(&psock->ingress_skb);
680 
681 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
682 	refcount_set(&psock->refcnt, 1);
683 
684 	rcu_assign_sk_user_data_nocopy(sk, psock);
685 	sock_hold(sk);
686 
687 out:
688 	write_unlock_bh(&sk->sk_callback_lock);
689 	return psock;
690 }
691 EXPORT_SYMBOL_GPL(sk_psock_init);
692 
693 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
694 {
695 	struct sk_psock_link *link;
696 
697 	spin_lock_bh(&psock->link_lock);
698 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
699 					list);
700 	if (link)
701 		list_del(&link->list);
702 	spin_unlock_bh(&psock->link_lock);
703 	return link;
704 }
705 
706 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
707 {
708 	struct sk_msg *msg, *tmp;
709 
710 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
711 		list_del(&msg->list);
712 		sk_msg_free(psock->sk, msg);
713 		kfree(msg);
714 	}
715 }
716 
717 static void __sk_psock_zap_ingress(struct sk_psock *psock)
718 {
719 	struct sk_buff *skb;
720 
721 	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
722 		skb_bpf_redirect_clear(skb);
723 		sock_drop(psock->sk, skb);
724 	}
725 	__sk_psock_purge_ingress_msg(psock);
726 }
727 
728 static void sk_psock_link_destroy(struct sk_psock *psock)
729 {
730 	struct sk_psock_link *link, *tmp;
731 
732 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
733 		list_del(&link->list);
734 		sk_psock_free_link(link);
735 	}
736 }
737 
738 void sk_psock_stop(struct sk_psock *psock, bool wait)
739 {
740 	spin_lock_bh(&psock->ingress_lock);
741 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
742 	sk_psock_cork_free(psock);
743 	__sk_psock_zap_ingress(psock);
744 	spin_unlock_bh(&psock->ingress_lock);
745 
746 	if (wait)
747 		cancel_work_sync(&psock->work);
748 }
749 
750 static void sk_psock_done_strp(struct sk_psock *psock);
751 
752 static void sk_psock_destroy(struct work_struct *work)
753 {
754 	struct sk_psock *psock = container_of(to_rcu_work(work),
755 					      struct sk_psock, rwork);
756 	/* No sk_callback_lock since already detached. */
757 
758 	sk_psock_done_strp(psock);
759 
760 	cancel_work_sync(&psock->work);
761 	mutex_destroy(&psock->work_mutex);
762 
763 	psock_progs_drop(&psock->progs);
764 
765 	sk_psock_link_destroy(psock);
766 	sk_psock_cork_free(psock);
767 
768 	if (psock->sk_redir)
769 		sock_put(psock->sk_redir);
770 	sock_put(psock->sk);
771 	kfree(psock);
772 }
773 
774 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
775 {
776 	sk_psock_stop(psock, false);
777 
778 	write_lock_bh(&sk->sk_callback_lock);
779 	sk_psock_restore_proto(sk, psock);
780 	rcu_assign_sk_user_data(sk, NULL);
781 	if (psock->progs.stream_parser)
782 		sk_psock_stop_strp(sk, psock);
783 	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
784 		sk_psock_stop_verdict(sk, psock);
785 	write_unlock_bh(&sk->sk_callback_lock);
786 
787 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
788 	queue_rcu_work(system_wq, &psock->rwork);
789 }
790 EXPORT_SYMBOL_GPL(sk_psock_drop);
791 
792 static int sk_psock_map_verd(int verdict, bool redir)
793 {
794 	switch (verdict) {
795 	case SK_PASS:
796 		return redir ? __SK_REDIRECT : __SK_PASS;
797 	case SK_DROP:
798 	default:
799 		break;
800 	}
801 
802 	return __SK_DROP;
803 }
804 
805 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
806 			 struct sk_msg *msg)
807 {
808 	struct bpf_prog *prog;
809 	int ret;
810 
811 	rcu_read_lock();
812 	prog = READ_ONCE(psock->progs.msg_parser);
813 	if (unlikely(!prog)) {
814 		ret = __SK_PASS;
815 		goto out;
816 	}
817 
818 	sk_msg_compute_data_pointers(msg);
819 	msg->sk = sk;
820 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
821 	ret = sk_psock_map_verd(ret, msg->sk_redir);
822 	psock->apply_bytes = msg->apply_bytes;
823 	if (ret == __SK_REDIRECT) {
824 		if (psock->sk_redir)
825 			sock_put(psock->sk_redir);
826 		psock->sk_redir = msg->sk_redir;
827 		if (!psock->sk_redir) {
828 			ret = __SK_DROP;
829 			goto out;
830 		}
831 		sock_hold(psock->sk_redir);
832 	}
833 out:
834 	rcu_read_unlock();
835 	return ret;
836 }
837 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
838 
839 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
840 {
841 	struct sk_psock *psock_other;
842 	struct sock *sk_other;
843 
844 	sk_other = skb_bpf_redirect_fetch(skb);
845 	/* This error is a buggy BPF program, it returned a redirect
846 	 * return code, but then didn't set a redirect interface.
847 	 */
848 	if (unlikely(!sk_other)) {
849 		sock_drop(from->sk, skb);
850 		return -EIO;
851 	}
852 	psock_other = sk_psock(sk_other);
853 	/* This error indicates the socket is being torn down or had another
854 	 * error that caused the pipe to break. We can't send a packet on
855 	 * a socket that is in this state so we drop the skb.
856 	 */
857 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
858 		skb_bpf_redirect_clear(skb);
859 		sock_drop(from->sk, skb);
860 		return -EIO;
861 	}
862 	spin_lock_bh(&psock_other->ingress_lock);
863 	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
864 		spin_unlock_bh(&psock_other->ingress_lock);
865 		skb_bpf_redirect_clear(skb);
866 		sock_drop(from->sk, skb);
867 		return -EIO;
868 	}
869 
870 	skb_queue_tail(&psock_other->ingress_skb, skb);
871 	schedule_work(&psock_other->work);
872 	spin_unlock_bh(&psock_other->ingress_lock);
873 	return 0;
874 }
875 
876 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
877 				       struct sk_psock *from, int verdict)
878 {
879 	switch (verdict) {
880 	case __SK_REDIRECT:
881 		sk_psock_skb_redirect(from, skb);
882 		break;
883 	case __SK_PASS:
884 	case __SK_DROP:
885 	default:
886 		break;
887 	}
888 }
889 
890 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
891 {
892 	struct bpf_prog *prog;
893 	int ret = __SK_PASS;
894 
895 	rcu_read_lock();
896 	prog = READ_ONCE(psock->progs.stream_verdict);
897 	if (likely(prog)) {
898 		skb->sk = psock->sk;
899 		skb_dst_drop(skb);
900 		skb_bpf_redirect_clear(skb);
901 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
902 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
903 		skb->sk = NULL;
904 	}
905 	sk_psock_tls_verdict_apply(skb, psock, ret);
906 	rcu_read_unlock();
907 	return ret;
908 }
909 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
910 
911 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
912 				  int verdict)
913 {
914 	struct sock *sk_other;
915 	int err = 0;
916 
917 	switch (verdict) {
918 	case __SK_PASS:
919 		err = -EIO;
920 		sk_other = psock->sk;
921 		if (sock_flag(sk_other, SOCK_DEAD) ||
922 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
923 			goto out_free;
924 		}
925 
926 		skb_bpf_set_ingress(skb);
927 
928 		/* If the queue is empty then we can submit directly
929 		 * into the msg queue. If its not empty we have to
930 		 * queue work otherwise we may get OOO data. Otherwise,
931 		 * if sk_psock_skb_ingress errors will be handled by
932 		 * retrying later from workqueue.
933 		 */
934 		if (skb_queue_empty(&psock->ingress_skb)) {
935 			err = sk_psock_skb_ingress_self(psock, skb);
936 		}
937 		if (err < 0) {
938 			spin_lock_bh(&psock->ingress_lock);
939 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
940 				skb_queue_tail(&psock->ingress_skb, skb);
941 				schedule_work(&psock->work);
942 				err = 0;
943 			}
944 			spin_unlock_bh(&psock->ingress_lock);
945 			if (err < 0) {
946 				skb_bpf_redirect_clear(skb);
947 				goto out_free;
948 			}
949 		}
950 		break;
951 	case __SK_REDIRECT:
952 		err = sk_psock_skb_redirect(psock, skb);
953 		break;
954 	case __SK_DROP:
955 	default:
956 out_free:
957 		sock_drop(psock->sk, skb);
958 	}
959 
960 	return err;
961 }
962 
963 static void sk_psock_write_space(struct sock *sk)
964 {
965 	struct sk_psock *psock;
966 	void (*write_space)(struct sock *sk) = NULL;
967 
968 	rcu_read_lock();
969 	psock = sk_psock(sk);
970 	if (likely(psock)) {
971 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
972 			schedule_work(&psock->work);
973 		write_space = psock->saved_write_space;
974 	}
975 	rcu_read_unlock();
976 	if (write_space)
977 		write_space(sk);
978 }
979 
980 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
981 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
982 {
983 	struct sk_psock *psock;
984 	struct bpf_prog *prog;
985 	int ret = __SK_DROP;
986 	struct sock *sk;
987 
988 	rcu_read_lock();
989 	sk = strp->sk;
990 	psock = sk_psock(sk);
991 	if (unlikely(!psock)) {
992 		sock_drop(sk, skb);
993 		goto out;
994 	}
995 	prog = READ_ONCE(psock->progs.stream_verdict);
996 	if (likely(prog)) {
997 		skb->sk = sk;
998 		skb_dst_drop(skb);
999 		skb_bpf_redirect_clear(skb);
1000 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1001 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1002 		skb->sk = NULL;
1003 	}
1004 	sk_psock_verdict_apply(psock, skb, ret);
1005 out:
1006 	rcu_read_unlock();
1007 }
1008 
1009 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1010 {
1011 	return err;
1012 }
1013 
1014 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1015 {
1016 	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1017 	struct bpf_prog *prog;
1018 	int ret = skb->len;
1019 
1020 	rcu_read_lock();
1021 	prog = READ_ONCE(psock->progs.stream_parser);
1022 	if (likely(prog)) {
1023 		skb->sk = psock->sk;
1024 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1025 		skb->sk = NULL;
1026 	}
1027 	rcu_read_unlock();
1028 	return ret;
1029 }
1030 
1031 /* Called with socket lock held. */
1032 static void sk_psock_strp_data_ready(struct sock *sk)
1033 {
1034 	struct sk_psock *psock;
1035 
1036 	rcu_read_lock();
1037 	psock = sk_psock(sk);
1038 	if (likely(psock)) {
1039 		if (tls_sw_has_ctx_rx(sk)) {
1040 			psock->saved_data_ready(sk);
1041 		} else {
1042 			write_lock_bh(&sk->sk_callback_lock);
1043 			strp_data_ready(&psock->strp);
1044 			write_unlock_bh(&sk->sk_callback_lock);
1045 		}
1046 	}
1047 	rcu_read_unlock();
1048 }
1049 
1050 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1051 {
1052 	static const struct strp_callbacks cb = {
1053 		.rcv_msg	= sk_psock_strp_read,
1054 		.read_sock_done	= sk_psock_strp_read_done,
1055 		.parse_msg	= sk_psock_strp_parse,
1056 	};
1057 
1058 	return strp_init(&psock->strp, sk, &cb);
1059 }
1060 
1061 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1062 {
1063 	if (psock->saved_data_ready)
1064 		return;
1065 
1066 	psock->saved_data_ready = sk->sk_data_ready;
1067 	sk->sk_data_ready = sk_psock_strp_data_ready;
1068 	sk->sk_write_space = sk_psock_write_space;
1069 }
1070 
1071 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1072 {
1073 	if (!psock->saved_data_ready)
1074 		return;
1075 
1076 	sk->sk_data_ready = psock->saved_data_ready;
1077 	psock->saved_data_ready = NULL;
1078 	strp_stop(&psock->strp);
1079 }
1080 
1081 static void sk_psock_done_strp(struct sk_psock *psock)
1082 {
1083 	/* Parser has been stopped */
1084 	if (psock->progs.stream_parser)
1085 		strp_done(&psock->strp);
1086 }
1087 #else
1088 static void sk_psock_done_strp(struct sk_psock *psock)
1089 {
1090 }
1091 #endif /* CONFIG_BPF_STREAM_PARSER */
1092 
1093 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1094 				 unsigned int offset, size_t orig_len)
1095 {
1096 	struct sock *sk = (struct sock *)desc->arg.data;
1097 	struct sk_psock *psock;
1098 	struct bpf_prog *prog;
1099 	int ret = __SK_DROP;
1100 	int len = skb->len;
1101 
1102 	/* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1103 	skb = skb_clone(skb, GFP_ATOMIC);
1104 	if (!skb) {
1105 		desc->error = -ENOMEM;
1106 		return 0;
1107 	}
1108 
1109 	rcu_read_lock();
1110 	psock = sk_psock(sk);
1111 	if (unlikely(!psock)) {
1112 		len = 0;
1113 		sock_drop(sk, skb);
1114 		goto out;
1115 	}
1116 	prog = READ_ONCE(psock->progs.stream_verdict);
1117 	if (!prog)
1118 		prog = READ_ONCE(psock->progs.skb_verdict);
1119 	if (likely(prog)) {
1120 		skb->sk = sk;
1121 		skb_dst_drop(skb);
1122 		skb_bpf_redirect_clear(skb);
1123 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1124 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1125 		skb->sk = NULL;
1126 	}
1127 	if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1128 		len = 0;
1129 out:
1130 	rcu_read_unlock();
1131 	return len;
1132 }
1133 
1134 static void sk_psock_verdict_data_ready(struct sock *sk)
1135 {
1136 	struct socket *sock = sk->sk_socket;
1137 	read_descriptor_t desc;
1138 
1139 	if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1140 		return;
1141 
1142 	desc.arg.data = sk;
1143 	desc.error = 0;
1144 	desc.count = 1;
1145 
1146 	sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1147 }
1148 
1149 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1150 {
1151 	if (psock->saved_data_ready)
1152 		return;
1153 
1154 	psock->saved_data_ready = sk->sk_data_ready;
1155 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1156 	sk->sk_write_space = sk_psock_write_space;
1157 }
1158 
1159 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1160 {
1161 	if (!psock->saved_data_ready)
1162 		return;
1163 
1164 	sk->sk_data_ready = psock->saved_data_ready;
1165 	psock->saved_data_ready = NULL;
1166 }
1167