1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 #include <trace/events/sock.h>
12
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 {
15 if (msg->sg.end > msg->sg.start &&
16 elem_first_coalesce < msg->sg.end)
17 return true;
18
19 if (msg->sg.end < msg->sg.start &&
20 (elem_first_coalesce > msg->sg.start ||
21 elem_first_coalesce < msg->sg.end))
22 return true;
23
24 return false;
25 }
26
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 int elem_first_coalesce)
29 {
30 struct page_frag *pfrag = sk_page_frag(sk);
31 u32 osize = msg->sg.size;
32 int ret = 0;
33
34 len -= msg->sg.size;
35 while (len > 0) {
36 struct scatterlist *sge;
37 u32 orig_offset;
38 int use, i;
39
40 if (!sk_page_frag_refill(sk, pfrag)) {
41 ret = -ENOMEM;
42 goto msg_trim;
43 }
44
45 orig_offset = pfrag->offset;
46 use = min_t(int, len, pfrag->size - orig_offset);
47 if (!sk_wmem_schedule(sk, use)) {
48 ret = -ENOMEM;
49 goto msg_trim;
50 }
51
52 i = msg->sg.end;
53 sk_msg_iter_var_prev(i);
54 sge = &msg->sg.data[i];
55
56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 sg_page(sge) == pfrag->page &&
58 sge->offset + sge->length == orig_offset) {
59 sge->length += use;
60 } else {
61 if (sk_msg_full(msg)) {
62 ret = -ENOSPC;
63 break;
64 }
65
66 sge = &msg->sg.data[msg->sg.end];
67 sg_unmark_end(sge);
68 sg_set_page(sge, pfrag->page, use, orig_offset);
69 get_page(pfrag->page);
70 sk_msg_iter_next(msg, end);
71 }
72
73 sk_mem_charge(sk, use);
74 msg->sg.size += use;
75 pfrag->offset += use;
76 len -= use;
77 }
78
79 return ret;
80
81 msg_trim:
82 sk_msg_trim(sk, msg, osize);
83 return ret;
84 }
85 EXPORT_SYMBOL_GPL(sk_msg_alloc);
86
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 u32 off, u32 len)
89 {
90 int i = src->sg.start;
91 struct scatterlist *sge = sk_msg_elem(src, i);
92 struct scatterlist *sgd = NULL;
93 u32 sge_len, sge_off;
94
95 while (off) {
96 if (sge->length > off)
97 break;
98 off -= sge->length;
99 sk_msg_iter_var_next(i);
100 if (i == src->sg.end && off)
101 return -ENOSPC;
102 sge = sk_msg_elem(src, i);
103 }
104
105 while (len) {
106 sge_len = sge->length - off;
107 if (sge_len > len)
108 sge_len = len;
109
110 if (dst->sg.end)
111 sgd = sk_msg_elem(dst, dst->sg.end - 1);
112
113 if (sgd &&
114 (sg_page(sge) == sg_page(sgd)) &&
115 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 sgd->length += sge_len;
117 dst->sg.size += sge_len;
118 } else if (!sk_msg_full(dst)) {
119 sge_off = sge->offset + off;
120 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 } else {
122 return -ENOSPC;
123 }
124
125 off = 0;
126 len -= sge_len;
127 sk_mem_charge(sk, sge_len);
128 sk_msg_iter_var_next(i);
129 if (i == src->sg.end && len)
130 return -ENOSPC;
131 sge = sk_msg_elem(src, i);
132 }
133
134 return 0;
135 }
136 EXPORT_SYMBOL_GPL(sk_msg_clone);
137
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139 {
140 int i = msg->sg.start;
141
142 do {
143 struct scatterlist *sge = sk_msg_elem(msg, i);
144
145 if (bytes < sge->length) {
146 sge->length -= bytes;
147 sge->offset += bytes;
148 sk_mem_uncharge(sk, bytes);
149 break;
150 }
151
152 sk_mem_uncharge(sk, sge->length);
153 bytes -= sge->length;
154 sge->length = 0;
155 sge->offset = 0;
156 sk_msg_iter_var_next(i);
157 } while (bytes && i != msg->sg.end);
158 msg->sg.start = i;
159 }
160 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163 {
164 int i = msg->sg.start;
165
166 do {
167 struct scatterlist *sge = &msg->sg.data[i];
168 int uncharge = (bytes < sge->length) ? bytes : sge->length;
169
170 sk_mem_uncharge(sk, uncharge);
171 bytes -= uncharge;
172 sk_msg_iter_var_next(i);
173 } while (i != msg->sg.end);
174 }
175 EXPORT_SYMBOL_GPL(sk_msg_return);
176
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 bool charge)
179 {
180 struct scatterlist *sge = sk_msg_elem(msg, i);
181 u32 len = sge->length;
182
183 /* When the skb owns the memory we free it from consume_skb path. */
184 if (!msg->skb) {
185 if (charge)
186 sk_mem_uncharge(sk, len);
187 put_page(sg_page(sge));
188 }
189 memset(sge, 0, sizeof(*sge));
190 return len;
191 }
192
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 bool charge)
195 {
196 struct scatterlist *sge = sk_msg_elem(msg, i);
197 int freed = 0;
198
199 while (msg->sg.size) {
200 msg->sg.size -= sge->length;
201 freed += sk_msg_free_elem(sk, msg, i, charge);
202 sk_msg_iter_var_next(i);
203 sk_msg_check_to_free(msg, i, msg->sg.size);
204 sge = sk_msg_elem(msg, i);
205 }
206 consume_skb(msg->skb);
207 sk_msg_init(msg);
208 return freed;
209 }
210
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212 {
213 return __sk_msg_free(sk, msg, msg->sg.start, false);
214 }
215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216
sk_msg_free(struct sock * sk,struct sk_msg * msg)217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218 {
219 return __sk_msg_free(sk, msg, msg->sg.start, true);
220 }
221 EXPORT_SYMBOL_GPL(sk_msg_free);
222
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 u32 bytes, bool charge)
225 {
226 struct scatterlist *sge;
227 u32 i = msg->sg.start;
228
229 while (bytes) {
230 sge = sk_msg_elem(msg, i);
231 if (!sge->length)
232 break;
233 if (bytes < sge->length) {
234 if (charge)
235 sk_mem_uncharge(sk, bytes);
236 sge->length -= bytes;
237 sge->offset += bytes;
238 msg->sg.size -= bytes;
239 break;
240 }
241
242 msg->sg.size -= sge->length;
243 bytes -= sge->length;
244 sk_msg_free_elem(sk, msg, i, charge);
245 sk_msg_iter_var_next(i);
246 sk_msg_check_to_free(msg, i, bytes);
247 }
248 msg->sg.start = i;
249 }
250
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252 {
253 __sk_msg_free_partial(sk, msg, bytes, true);
254 }
255 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 u32 bytes)
259 {
260 __sk_msg_free_partial(sk, msg, bytes, false);
261 }
262
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264 {
265 int trim = msg->sg.size - len;
266 u32 i = msg->sg.end;
267
268 if (trim <= 0) {
269 WARN_ON(trim < 0);
270 return;
271 }
272
273 sk_msg_iter_var_prev(i);
274 msg->sg.size = len;
275 while (msg->sg.data[i].length &&
276 trim >= msg->sg.data[i].length) {
277 trim -= msg->sg.data[i].length;
278 sk_msg_free_elem(sk, msg, i, true);
279 sk_msg_iter_var_prev(i);
280 if (!trim)
281 goto out;
282 }
283
284 msg->sg.data[i].length -= trim;
285 sk_mem_uncharge(sk, trim);
286 /* Adjust copybreak if it falls into the trimmed part of last buf */
287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 msg->sg.copybreak = msg->sg.data[i].length;
289 out:
290 sk_msg_iter_var_next(i);
291 msg->sg.end = i;
292
293 /* If we trim data a full sg elem before curr pointer update
294 * copybreak and current so that any future copy operations
295 * start at new copy location.
296 * However trimmed data that has not yet been used in a copy op
297 * does not require an update.
298 */
299 if (!msg->sg.size) {
300 msg->sg.curr = msg->sg.start;
301 msg->sg.copybreak = 0;
302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 sk_msg_iter_var_prev(i);
305 msg->sg.curr = i;
306 msg->sg.copybreak = msg->sg.data[i].length;
307 }
308 }
309 EXPORT_SYMBOL_GPL(sk_msg_trim);
310
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 struct sk_msg *msg, u32 bytes)
313 {
314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 const int to_max_pages = MAX_MSG_FRAGS;
316 struct page *pages[MAX_MSG_FRAGS];
317 ssize_t orig, copied, use, offset;
318
319 orig = msg->sg.size;
320 while (bytes > 0) {
321 i = 0;
322 maxpages = to_max_pages - num_elems;
323 if (maxpages == 0) {
324 ret = -EFAULT;
325 goto out;
326 }
327
328 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 &offset);
330 if (copied <= 0) {
331 ret = -EFAULT;
332 goto out;
333 }
334
335 bytes -= copied;
336 msg->sg.size += copied;
337
338 while (copied) {
339 use = min_t(int, copied, PAGE_SIZE - offset);
340 sg_set_page(&msg->sg.data[msg->sg.end],
341 pages[i], use, offset);
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 sk_mem_charge(sk, use);
344
345 offset = 0;
346 copied -= use;
347 sk_msg_iter_next(msg, end);
348 num_elems++;
349 i++;
350 }
351 /* When zerocopy is mixed with sk_msg_*copy* operations we
352 * may have a copybreak set in this case clear and prefer
353 * zerocopy remainder when possible.
354 */
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
357 }
358 out:
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
360 * also needs to be cleared.
361 */
362 if (ret)
363 iov_iter_revert(from, msg->sg.size - orig);
364 return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 struct sk_msg *msg, u32 bytes)
370 {
371 int ret = -ENOSPC, i = msg->sg.curr;
372 u32 copy, buf_size, copied = 0;
373 struct scatterlist *sge;
374 void *to;
375
376 do {
377 sge = sk_msg_elem(msg, i);
378 /* This is possible if a trim operation shrunk the buffer */
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
381 sk_msg_iter_var_next(i);
382 if (i == msg->sg.end)
383 break;
384 sge = sk_msg_elem(msg, i);
385 }
386
387 buf_size = sge->length - msg->sg.copybreak;
388 copy = (buf_size > bytes) ? bytes : buf_size;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 ret = copy_from_iter_nocache(to, copy, from);
393 else
394 ret = copy_from_iter(to, copy, from);
395 if (ret != copy) {
396 ret = -EFAULT;
397 goto out;
398 }
399 bytes -= copy;
400 copied += copy;
401 if (!bytes)
402 break;
403 msg->sg.copybreak = 0;
404 sk_msg_iter_var_next(i);
405 } while (i != msg->sg.end);
406 out:
407 msg->sg.curr = i;
408 return (ret < 0) ? ret : copied;
409 }
410 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
411
__sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags,int * copied_from_self)412 int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
413 int len, int flags, int *copied_from_self)
414 {
415 struct iov_iter *iter = &msg->msg_iter;
416 int peek = flags & MSG_PEEK;
417 struct sk_msg *msg_rx;
418 int i, copied = 0;
419 bool from_self;
420
421 msg_rx = sk_psock_peek_msg(psock);
422 if (copied_from_self)
423 *copied_from_self = 0;
424
425 while (copied != len) {
426 struct scatterlist *sge;
427
428 if (unlikely(!msg_rx))
429 break;
430
431 from_self = msg_rx->sk == sk;
432 i = msg_rx->sg.start;
433 do {
434 struct page *page;
435 int copy;
436
437 sge = sk_msg_elem(msg_rx, i);
438 copy = sge->length;
439 page = sg_page(sge);
440 if (copied + copy > len)
441 copy = len - copied;
442 if (copy)
443 copy = copy_page_to_iter(page, sge->offset, copy, iter);
444 if (!copy) {
445 copied = copied ? copied : -EFAULT;
446 goto out;
447 }
448
449 copied += copy;
450 if (from_self && copied_from_self)
451 *copied_from_self += copy;
452
453 if (likely(!peek)) {
454 sge->offset += copy;
455 sge->length -= copy;
456 if (!msg_rx->skb) {
457 sk_mem_uncharge(sk, copy);
458 atomic_sub(copy, &sk->sk_rmem_alloc);
459 }
460 msg_rx->sg.size -= copy;
461 sk_psock_msg_len_add(psock, -copy);
462
463 if (!sge->length) {
464 sk_msg_iter_var_next(i);
465 if (!msg_rx->skb)
466 put_page(page);
467 }
468 } else {
469 /* Lets not optimize peek case if copy_page_to_iter
470 * didn't copy the entire length lets just break.
471 */
472 if (copy != sge->length)
473 goto out;
474 sk_msg_iter_var_next(i);
475 }
476
477 if (copied == len)
478 break;
479 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
480
481 if (unlikely(peek)) {
482 msg_rx = sk_psock_next_msg(psock, msg_rx);
483 if (!msg_rx)
484 break;
485 continue;
486 }
487
488 msg_rx->sg.start = i;
489 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
490 msg_rx = sk_psock_dequeue_msg(psock);
491 kfree_sk_msg(msg_rx);
492 }
493 msg_rx = sk_psock_peek_msg(psock);
494 }
495 out:
496 return copied;
497 }
498
499 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)500 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
501 int len, int flags)
502 {
503 return __sk_msg_recvmsg(sk, psock, msg, len, flags, NULL);
504 }
505 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
506
sk_msg_is_readable(struct sock * sk)507 bool sk_msg_is_readable(struct sock *sk)
508 {
509 struct sk_psock *psock;
510 bool empty = true;
511
512 rcu_read_lock();
513 psock = sk_psock(sk);
514 if (likely(psock))
515 empty = list_empty(&psock->ingress_msg);
516 rcu_read_unlock();
517 return !empty;
518 }
519 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
520
alloc_sk_msg(gfp_t gfp)521 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
522 {
523 struct sk_msg *msg;
524
525 msg = kzalloc_obj(*msg, gfp | __GFP_NOWARN);
526 if (unlikely(!msg))
527 return NULL;
528 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
529 return msg;
530 }
531
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)532 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
533 struct sk_buff *skb)
534 {
535 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
536 return NULL;
537
538 if (!sk_rmem_schedule(sk, skb, skb->truesize))
539 return NULL;
540
541 return alloc_sk_msg(GFP_KERNEL);
542 }
543
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,u32 off,u32 len,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg,bool take_ref)544 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
545 u32 off, u32 len,
546 struct sk_psock *psock,
547 struct sock *sk,
548 struct sk_msg *msg,
549 bool take_ref)
550 {
551 int num_sge, copied;
552
553 /* skb_to_sgvec will fail when the total number of fragments in
554 * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the
555 * caller may aggregate multiple skbs.
556 */
557 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
558 if (num_sge < 0) {
559 /* skb linearize may fail with ENOMEM, but lets simply try again
560 * later if this happens. Under memory pressure we don't want to
561 * drop the skb. We need to linearize the skb so that the mapping
562 * in skb_to_sgvec can not error.
563 * Note that skb_linearize requires the skb not to be shared.
564 */
565 if (skb_linearize(skb))
566 return -EAGAIN;
567
568 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
569 if (unlikely(num_sge < 0))
570 return num_sge;
571 }
572
573 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
574 psock->ingress_bytes += len;
575 #endif
576 copied = len;
577 msg->sg.start = 0;
578 msg->sg.size = copied;
579 msg->sg.end = num_sge;
580 msg->skb = take_ref ? skb_get(skb) : skb;
581
582 sk_psock_queue_msg(psock, msg);
583 sk_psock_data_ready(sk, psock);
584 return copied;
585 }
586
587 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
588 u32 off, u32 len, bool take_ref);
589
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)590 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
591 u32 off, u32 len)
592 {
593 struct sock *sk = psock->sk;
594 struct sk_msg *msg;
595 int err;
596
597 /* If we are receiving on the same sock skb->sk is already assigned,
598 * skip memory accounting and owner transition seeing it already set
599 * correctly.
600 */
601 if (unlikely(skb->sk == sk))
602 return sk_psock_skb_ingress_self(psock, skb, off, len, true);
603 msg = sk_psock_create_ingress_msg(sk, skb);
604 if (!msg)
605 return -EAGAIN;
606
607 /* This will transition ownership of the data from the socket where
608 * the BPF program was run initiating the redirect to the socket
609 * we will eventually receive this data on. The data will be released
610 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
611 * into user buffers.
612 */
613 skb_set_owner_r(skb, sk);
614 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
615 if (err < 0)
616 kfree(msg);
617 return err;
618 }
619
620 /* Puts an skb on the ingress queue of the socket already assigned to the
621 * skb. In this case we do not need to check memory limits or skb_set_owner_r
622 * because the skb is already accounted for here.
623 */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool take_ref)624 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
625 u32 off, u32 len, bool take_ref)
626 {
627 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
628 struct sock *sk = psock->sk;
629 int err;
630
631 if (unlikely(!msg))
632 return -EAGAIN;
633 skb_set_owner_r(skb, sk);
634
635 /* This is used in tcp_bpf_recvmsg_parser() to determine whether the
636 * data originates from the socket's own protocol stack. No need to
637 * refcount sk because msg's lifetime is bound to sk via the ingress_msg.
638 */
639 msg->sk = sk;
640 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
641 if (err < 0)
642 kfree(msg);
643 return err;
644 }
645
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)646 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
647 u32 off, u32 len, bool ingress)
648 {
649 if (!ingress) {
650 if (!sock_writeable(psock->sk))
651 return -EAGAIN;
652 return skb_send_sock(psock->sk, skb, off, len);
653 }
654
655 return sk_psock_skb_ingress(psock, skb, off, len);
656 }
657
sk_psock_skb_state(struct sk_psock * psock,struct sk_psock_work_state * state,int len,int off)658 static void sk_psock_skb_state(struct sk_psock *psock,
659 struct sk_psock_work_state *state,
660 int len, int off)
661 {
662 spin_lock_bh(&psock->ingress_lock);
663 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
664 state->len = len;
665 state->off = off;
666 }
667 spin_unlock_bh(&psock->ingress_lock);
668 }
669
sk_psock_backlog(struct work_struct * work)670 static void sk_psock_backlog(struct work_struct *work)
671 {
672 struct delayed_work *dwork = to_delayed_work(work);
673 struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
674 struct sk_psock_work_state *state = &psock->work_state;
675 struct sk_buff *skb = NULL;
676 u32 len = 0, off = 0;
677 bool ingress;
678 int ret;
679
680 /* If sk is quickly removed from the map and then added back, the old
681 * psock should not be scheduled, because there are now two psocks
682 * pointing to the same sk.
683 */
684 if (!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
685 return;
686
687 /* Increment the psock refcnt to synchronize with close(fd) path in
688 * sock_map_close(), ensuring we wait for backlog thread completion
689 * before sk_socket freed. If refcnt increment fails, it indicates
690 * sock_map_close() completed with sk_socket potentially already freed.
691 */
692 if (!sk_psock_get(psock->sk))
693 return;
694 mutex_lock(&psock->work_mutex);
695 while ((skb = skb_peek(&psock->ingress_skb))) {
696 len = skb->len;
697 off = 0;
698 if (skb_bpf_strparser(skb)) {
699 struct strp_msg *stm = strp_msg(skb);
700
701 off = stm->offset;
702 len = stm->full_len;
703 }
704
705 /* Resume processing from previous partial state */
706 if (unlikely(state->len)) {
707 len = state->len;
708 off = state->off;
709 }
710
711 ingress = skb_bpf_ingress(skb);
712 skb_bpf_redirect_clear(skb);
713 do {
714 ret = -EIO;
715 if (!sock_flag(psock->sk, SOCK_DEAD))
716 ret = sk_psock_handle_skb(psock, skb, off,
717 len, ingress);
718 if (ret <= 0) {
719 if (ret == -EAGAIN) {
720 sk_psock_skb_state(psock, state, len, off);
721 /* Restore redir info we cleared before */
722 skb_bpf_set_redir(skb, psock->sk, ingress);
723 /* Delay slightly to prioritize any
724 * other work that might be here.
725 */
726 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
727 schedule_delayed_work(&psock->work, 1);
728 goto end;
729 }
730 /* Hard errors break pipe and stop xmit. */
731 sk_psock_report_error(psock, ret ? -ret : EPIPE);
732 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
733 goto end;
734 }
735 off += ret;
736 len -= ret;
737 } while (len);
738
739 /* The entire skb sent, clear state */
740 sk_psock_skb_state(psock, state, 0, 0);
741 skb = skb_dequeue(&psock->ingress_skb);
742 kfree_skb(skb);
743 }
744 end:
745 mutex_unlock(&psock->work_mutex);
746 sk_psock_put(psock->sk, psock);
747 }
748
sk_psock_init(struct sock * sk,int node)749 struct sk_psock *sk_psock_init(struct sock *sk, int node)
750 {
751 struct sk_psock *psock;
752 struct proto *prot;
753
754 write_lock_bh(&sk->sk_callback_lock);
755
756 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
757 psock = ERR_PTR(-EINVAL);
758 goto out;
759 }
760
761 if (sk->sk_user_data) {
762 psock = ERR_PTR(-EBUSY);
763 goto out;
764 }
765
766 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
767 if (!psock) {
768 psock = ERR_PTR(-ENOMEM);
769 goto out;
770 }
771
772 prot = READ_ONCE(sk->sk_prot);
773 psock->sk = sk;
774 psock->eval = __SK_NONE;
775 psock->sk_proto = prot;
776 psock->saved_unhash = prot->unhash;
777 psock->saved_destroy = prot->destroy;
778 psock->saved_close = prot->close;
779 psock->saved_write_space = sk->sk_write_space;
780
781 INIT_LIST_HEAD(&psock->link);
782 spin_lock_init(&psock->link_lock);
783
784 INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
785 mutex_init(&psock->work_mutex);
786 INIT_LIST_HEAD(&psock->ingress_msg);
787 spin_lock_init(&psock->ingress_lock);
788 skb_queue_head_init(&psock->ingress_skb);
789
790 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
791 refcount_set(&psock->refcnt, 1);
792
793 __rcu_assign_sk_user_data_with_flags(sk, psock,
794 SK_USER_DATA_NOCOPY |
795 SK_USER_DATA_PSOCK);
796 sock_hold(sk);
797
798 out:
799 write_unlock_bh(&sk->sk_callback_lock);
800 return psock;
801 }
802 EXPORT_SYMBOL_GPL(sk_psock_init);
803
sk_psock_link_pop(struct sk_psock * psock)804 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
805 {
806 struct sk_psock_link *link;
807
808 spin_lock_bh(&psock->link_lock);
809 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
810 list);
811 if (link)
812 list_del(&link->list);
813 spin_unlock_bh(&psock->link_lock);
814 return link;
815 }
816
__sk_psock_purge_ingress_msg(struct sk_psock * psock)817 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
818 {
819 struct sk_msg *msg, *tmp;
820
821 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
822 list_del(&msg->list);
823 if (!msg->skb)
824 atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
825 sk_psock_msg_len_add(psock, -msg->sg.size);
826 sk_msg_free(psock->sk, msg);
827 kfree(msg);
828 }
829 WARN_ON_ONCE(psock->msg_tot_len);
830 }
831
__sk_psock_zap_ingress(struct sk_psock * psock)832 static void __sk_psock_zap_ingress(struct sk_psock *psock)
833 {
834 struct sk_buff *skb;
835
836 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
837 skb_bpf_redirect_clear(skb);
838 sock_drop(psock->sk, skb);
839 }
840 __sk_psock_purge_ingress_msg(psock);
841 }
842
sk_psock_link_destroy(struct sk_psock * psock)843 static void sk_psock_link_destroy(struct sk_psock *psock)
844 {
845 struct sk_psock_link *link, *tmp;
846
847 list_for_each_entry_safe(link, tmp, &psock->link, list) {
848 list_del(&link->list);
849 sk_psock_free_link(link);
850 }
851 }
852
sk_psock_stop(struct sk_psock * psock)853 void sk_psock_stop(struct sk_psock *psock)
854 {
855 spin_lock_bh(&psock->ingress_lock);
856 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
857 sk_psock_cork_free(psock);
858 spin_unlock_bh(&psock->ingress_lock);
859 }
860
861 static void sk_psock_done_strp(struct sk_psock *psock);
862
sk_psock_destroy(struct work_struct * work)863 static void sk_psock_destroy(struct work_struct *work)
864 {
865 struct sk_psock *psock = container_of(to_rcu_work(work),
866 struct sk_psock, rwork);
867 /* No sk_callback_lock since already detached. */
868
869 sk_psock_done_strp(psock);
870
871 cancel_delayed_work_sync(&psock->work);
872 __sk_psock_zap_ingress(psock);
873 mutex_destroy(&psock->work_mutex);
874
875 psock_progs_drop(&psock->progs);
876
877 sk_psock_link_destroy(psock);
878 sk_psock_cork_free(psock);
879
880 if (psock->sk_redir)
881 sock_put(psock->sk_redir);
882 if (psock->sk_pair)
883 sock_put(psock->sk_pair);
884 sock_put(psock->sk);
885 kfree(psock);
886 }
887
sk_psock_drop(struct sock * sk,struct sk_psock * psock)888 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
889 {
890 write_lock_bh(&sk->sk_callback_lock);
891 sk_psock_restore_proto(sk, psock);
892 rcu_assign_sk_user_data(sk, NULL);
893 if (psock->progs.stream_parser)
894 sk_psock_stop_strp(sk, psock);
895 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
896 sk_psock_stop_verdict(sk, psock);
897 write_unlock_bh(&sk->sk_callback_lock);
898
899 sk_psock_stop(psock);
900
901 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
902 queue_rcu_work(system_percpu_wq, &psock->rwork);
903 }
904 EXPORT_SYMBOL_GPL(sk_psock_drop);
905
sk_psock_map_verd(int verdict,bool redir)906 static int sk_psock_map_verd(int verdict, bool redir)
907 {
908 switch (verdict) {
909 case SK_PASS:
910 return redir ? __SK_REDIRECT : __SK_PASS;
911 case SK_DROP:
912 default:
913 break;
914 }
915
916 return __SK_DROP;
917 }
918
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)919 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
920 struct sk_msg *msg)
921 {
922 struct bpf_prog *prog;
923 int ret;
924
925 rcu_read_lock();
926 prog = READ_ONCE(psock->progs.msg_parser);
927 if (unlikely(!prog)) {
928 ret = __SK_PASS;
929 goto out;
930 }
931
932 sk_msg_compute_data_pointers(msg);
933 msg->sk = sk;
934 ret = bpf_prog_run_pin_on_cpu(prog, msg);
935 msg->sk = NULL;
936 ret = sk_psock_map_verd(ret, msg->sk_redir);
937 psock->apply_bytes = msg->apply_bytes;
938 if (ret == __SK_REDIRECT) {
939 if (psock->sk_redir) {
940 sock_put(psock->sk_redir);
941 psock->sk_redir = NULL;
942 }
943 if (!msg->sk_redir) {
944 ret = __SK_DROP;
945 goto out;
946 }
947 psock->redir_ingress = sk_msg_to_ingress(msg);
948 psock->sk_redir = msg->sk_redir;
949 sock_hold(psock->sk_redir);
950 }
951 out:
952 rcu_read_unlock();
953 return ret;
954 }
955 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
956
sk_psock_skb_redirect(struct sk_psock * from,struct sk_buff * skb)957 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
958 {
959 struct sk_psock *psock_other;
960 struct sock *sk_other;
961
962 sk_other = skb_bpf_redirect_fetch(skb);
963 /* This error is a buggy BPF program, it returned a redirect
964 * return code, but then didn't set a redirect interface.
965 */
966 if (unlikely(!sk_other)) {
967 skb_bpf_redirect_clear(skb);
968 sock_drop(from->sk, skb);
969 return -EIO;
970 }
971 psock_other = sk_psock(sk_other);
972 /* This error indicates the socket is being torn down or had another
973 * error that caused the pipe to break. We can't send a packet on
974 * a socket that is in this state so we drop the skb.
975 */
976 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
977 skb_bpf_redirect_clear(skb);
978 sock_drop(from->sk, skb);
979 return -EIO;
980 }
981 spin_lock_bh(&psock_other->ingress_lock);
982 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
983 spin_unlock_bh(&psock_other->ingress_lock);
984 skb_bpf_redirect_clear(skb);
985 sock_drop(from->sk, skb);
986 return -EIO;
987 }
988
989 skb_queue_tail(&psock_other->ingress_skb, skb);
990 schedule_delayed_work(&psock_other->work, 0);
991 spin_unlock_bh(&psock_other->ingress_lock);
992 return 0;
993 }
994
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sk_psock * from,int verdict)995 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
996 struct sk_psock *from, int verdict)
997 {
998 switch (verdict) {
999 case __SK_REDIRECT:
1000 sk_psock_skb_redirect(from, skb);
1001 break;
1002 case __SK_PASS:
1003 case __SK_DROP:
1004 default:
1005 break;
1006 }
1007 }
1008
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)1009 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
1010 {
1011 struct bpf_prog *prog;
1012 int ret = __SK_PASS;
1013
1014 rcu_read_lock();
1015 prog = READ_ONCE(psock->progs.stream_verdict);
1016 if (likely(prog)) {
1017 skb->sk = psock->sk;
1018 skb_dst_drop(skb);
1019 skb_bpf_redirect_clear(skb);
1020 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1021 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1022 skb->sk = NULL;
1023 }
1024 sk_psock_tls_verdict_apply(skb, psock, ret);
1025 rcu_read_unlock();
1026 return ret;
1027 }
1028 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
1029
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)1030 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
1031 int verdict)
1032 {
1033 struct sock *sk_other;
1034 int err = 0;
1035 u32 len, off;
1036
1037 switch (verdict) {
1038 case __SK_PASS:
1039 err = -EIO;
1040 sk_other = psock->sk;
1041 if (sock_flag(sk_other, SOCK_DEAD) ||
1042 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1043 goto out_free;
1044
1045 skb_bpf_set_ingress(skb);
1046
1047 /* If the queue is empty then we can submit directly
1048 * into the msg queue. If its not empty we have to
1049 * queue work otherwise we may get OOO data. Otherwise,
1050 * if sk_psock_skb_ingress errors will be handled by
1051 * retrying later from workqueue.
1052 */
1053 if (skb_queue_empty(&psock->ingress_skb)) {
1054 len = skb->len;
1055 off = 0;
1056 if (skb_bpf_strparser(skb)) {
1057 struct strp_msg *stm = strp_msg(skb);
1058
1059 off = stm->offset;
1060 len = stm->full_len;
1061 }
1062 err = sk_psock_skb_ingress_self(psock, skb, off, len, false);
1063 }
1064 if (err < 0) {
1065 spin_lock_bh(&psock->ingress_lock);
1066 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1067 skb_queue_tail(&psock->ingress_skb, skb);
1068 schedule_delayed_work(&psock->work, 0);
1069 err = 0;
1070 }
1071 spin_unlock_bh(&psock->ingress_lock);
1072 if (err < 0)
1073 goto out_free;
1074 }
1075 break;
1076 case __SK_REDIRECT:
1077 tcp_eat_skb(psock->sk, skb);
1078 err = sk_psock_skb_redirect(psock, skb);
1079 break;
1080 case __SK_DROP:
1081 default:
1082 out_free:
1083 skb_bpf_redirect_clear(skb);
1084 tcp_eat_skb(psock->sk, skb);
1085 sock_drop(psock->sk, skb);
1086 }
1087
1088 return err;
1089 }
1090
sk_psock_write_space(struct sock * sk)1091 static void sk_psock_write_space(struct sock *sk)
1092 {
1093 struct sk_psock *psock;
1094 void (*write_space)(struct sock *sk) = NULL;
1095
1096 rcu_read_lock();
1097 psock = sk_psock(sk);
1098 if (likely(psock)) {
1099 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1100 schedule_delayed_work(&psock->work, 0);
1101 write_space = psock->saved_write_space;
1102 }
1103 rcu_read_unlock();
1104 if (write_space)
1105 write_space(sk);
1106 }
1107
1108 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)1109 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1110 {
1111 struct sk_psock *psock;
1112 struct bpf_prog *prog;
1113 int ret = __SK_DROP;
1114 struct sock *sk;
1115
1116 rcu_read_lock();
1117 sk = strp->sk;
1118 psock = sk_psock(sk);
1119 if (unlikely(!psock)) {
1120 sock_drop(sk, skb);
1121 goto out;
1122 }
1123 prog = READ_ONCE(psock->progs.stream_verdict);
1124 if (likely(prog)) {
1125 skb->sk = sk;
1126 skb_dst_drop(skb);
1127 skb_bpf_redirect_clear(skb);
1128 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1129 skb_bpf_set_strparser(skb);
1130 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1131 skb->sk = NULL;
1132 }
1133 sk_psock_verdict_apply(psock, skb, ret);
1134 out:
1135 rcu_read_unlock();
1136 }
1137
sk_psock_strp_read_done(struct strparser * strp,int err)1138 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1139 {
1140 return err;
1141 }
1142
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1143 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1144 {
1145 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1146 struct bpf_prog *prog;
1147 int ret = skb->len;
1148
1149 rcu_read_lock();
1150 prog = READ_ONCE(psock->progs.stream_parser);
1151 if (likely(prog)) {
1152 skb->sk = psock->sk;
1153 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1154 skb->sk = NULL;
1155 }
1156 rcu_read_unlock();
1157 return ret;
1158 }
1159
1160 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1161 static void sk_psock_strp_data_ready(struct sock *sk)
1162 {
1163 struct sk_psock *psock;
1164
1165 trace_sk_data_ready(sk);
1166
1167 rcu_read_lock();
1168 psock = sk_psock(sk);
1169 if (likely(psock)) {
1170 if (tls_sw_has_ctx_rx(sk)) {
1171 psock->saved_data_ready(sk);
1172 } else {
1173 read_lock_bh(&sk->sk_callback_lock);
1174 strp_data_ready(&psock->strp);
1175 read_unlock_bh(&sk->sk_callback_lock);
1176 }
1177 }
1178 rcu_read_unlock();
1179 }
1180
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1181 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1182 {
1183 int ret;
1184
1185 static const struct strp_callbacks cb = {
1186 .rcv_msg = sk_psock_strp_read,
1187 .read_sock_done = sk_psock_strp_read_done,
1188 .parse_msg = sk_psock_strp_parse,
1189 };
1190
1191 ret = strp_init(&psock->strp, sk, &cb);
1192 if (!ret)
1193 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1194
1195 if (sk_is_tcp(sk)) {
1196 psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
1197 psock->copied_seq = tcp_sk(sk)->copied_seq;
1198 }
1199 return ret;
1200 }
1201
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1202 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1203 {
1204 if (psock->saved_data_ready)
1205 return;
1206
1207 psock->saved_data_ready = sk->sk_data_ready;
1208 sk->sk_data_ready = sk_psock_strp_data_ready;
1209 sk->sk_write_space = sk_psock_write_space;
1210 }
1211
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1212 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1213 {
1214 psock_set_prog(&psock->progs.stream_parser, NULL);
1215
1216 if (!psock->saved_data_ready)
1217 return;
1218
1219 sk->sk_data_ready = psock->saved_data_ready;
1220 psock->saved_data_ready = NULL;
1221 strp_stop(&psock->strp);
1222 }
1223
sk_psock_done_strp(struct sk_psock * psock)1224 static void sk_psock_done_strp(struct sk_psock *psock)
1225 {
1226 /* Parser has been stopped */
1227 if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1228 strp_done(&psock->strp);
1229 }
1230 #else
sk_psock_done_strp(struct sk_psock * psock)1231 static void sk_psock_done_strp(struct sk_psock *psock)
1232 {
1233 }
1234 #endif /* CONFIG_BPF_STREAM_PARSER */
1235
sk_psock_verdict_recv(struct sock * sk,struct sk_buff * skb)1236 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1237 {
1238 struct sk_psock *psock;
1239 struct bpf_prog *prog;
1240 int ret = __SK_DROP;
1241 int len = skb->len;
1242
1243 rcu_read_lock();
1244 psock = sk_psock(sk);
1245 if (unlikely(!psock)) {
1246 len = 0;
1247 tcp_eat_skb(sk, skb);
1248 sock_drop(sk, skb);
1249 goto out;
1250 }
1251 prog = READ_ONCE(psock->progs.stream_verdict);
1252 if (!prog)
1253 prog = READ_ONCE(psock->progs.skb_verdict);
1254 if (likely(prog)) {
1255 skb_dst_drop(skb);
1256 skb_bpf_redirect_clear(skb);
1257 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1258 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1259 }
1260 ret = sk_psock_verdict_apply(psock, skb, ret);
1261 if (ret < 0)
1262 len = ret;
1263 out:
1264 rcu_read_unlock();
1265 return len;
1266 }
1267
sk_psock_verdict_data_ready(struct sock * sk)1268 static void sk_psock_verdict_data_ready(struct sock *sk)
1269 {
1270 struct socket *sock = sk->sk_socket;
1271 const struct proto_ops *ops;
1272 int copied;
1273
1274 trace_sk_data_ready(sk);
1275
1276 if (unlikely(!sock))
1277 return;
1278 ops = READ_ONCE(sock->ops);
1279 if (!ops || !ops->read_skb)
1280 return;
1281 copied = ops->read_skb(sk, sk_psock_verdict_recv);
1282 if (copied >= 0) {
1283 struct sk_psock *psock;
1284
1285 rcu_read_lock();
1286 psock = sk_psock(sk);
1287 if (psock)
1288 sk_psock_data_ready(sk, psock);
1289 rcu_read_unlock();
1290 }
1291 }
1292
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1293 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1294 {
1295 if (psock->saved_data_ready)
1296 return;
1297
1298 psock->saved_data_ready = sk->sk_data_ready;
1299 sk->sk_data_ready = sk_psock_verdict_data_ready;
1300 sk->sk_write_space = sk_psock_write_space;
1301 }
1302
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1303 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1304 {
1305 psock_set_prog(&psock->progs.stream_verdict, NULL);
1306 psock_set_prog(&psock->progs.skb_verdict, NULL);
1307
1308 if (!psock->saved_data_ready)
1309 return;
1310
1311 sk->sk_data_ready = psock->saved_data_ready;
1312 psock->saved_data_ready = NULL;
1313 }
1314