xref: /linux/net/kcm/kcmsock.c (revision d457a0e329b0bfd3a1450e0b1a18cd2b47a25a08)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel Connection Multiplexor
4  *
5  * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6  */
7 
8 #include <linux/bpf.h>
9 #include <linux/errno.h>
10 #include <linux/errqueue.h>
11 #include <linux/file.h>
12 #include <linux/filter.h>
13 #include <linux/in.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/poll.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/socket.h>
22 #include <linux/uaccess.h>
23 #include <linux/workqueue.h>
24 #include <linux/syscalls.h>
25 #include <linux/sched/signal.h>
26 
27 #include <net/kcm.h>
28 #include <net/netns/generic.h>
29 #include <net/sock.h>
30 #include <uapi/linux/kcm.h>
31 #include <trace/events/sock.h>
32 
33 unsigned int kcm_net_id;
34 
35 static struct kmem_cache *kcm_psockp __read_mostly;
36 static struct kmem_cache *kcm_muxp __read_mostly;
37 static struct workqueue_struct *kcm_wq;
38 
39 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
40 {
41 	return (struct kcm_sock *)sk;
42 }
43 
44 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
45 {
46 	return (struct kcm_tx_msg *)skb->cb;
47 }
48 
49 static void report_csk_error(struct sock *csk, int err)
50 {
51 	csk->sk_err = EPIPE;
52 	sk_error_report(csk);
53 }
54 
55 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
56 			       bool wakeup_kcm)
57 {
58 	struct sock *csk = psock->sk;
59 	struct kcm_mux *mux = psock->mux;
60 
61 	/* Unrecoverable error in transmit */
62 
63 	spin_lock_bh(&mux->lock);
64 
65 	if (psock->tx_stopped) {
66 		spin_unlock_bh(&mux->lock);
67 		return;
68 	}
69 
70 	psock->tx_stopped = 1;
71 	KCM_STATS_INCR(psock->stats.tx_aborts);
72 
73 	if (!psock->tx_kcm) {
74 		/* Take off psocks_avail list */
75 		list_del(&psock->psock_avail_list);
76 	} else if (wakeup_kcm) {
77 		/* In this case psock is being aborted while outside of
78 		 * write_msgs and psock is reserved. Schedule tx_work
79 		 * to handle the failure there. Need to commit tx_stopped
80 		 * before queuing work.
81 		 */
82 		smp_mb();
83 
84 		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
85 	}
86 
87 	spin_unlock_bh(&mux->lock);
88 
89 	/* Report error on lower socket */
90 	report_csk_error(csk, err);
91 }
92 
93 /* RX mux lock held. */
94 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
95 				    struct kcm_psock *psock)
96 {
97 	STRP_STATS_ADD(mux->stats.rx_bytes,
98 		       psock->strp.stats.bytes -
99 		       psock->saved_rx_bytes);
100 	mux->stats.rx_msgs +=
101 		psock->strp.stats.msgs - psock->saved_rx_msgs;
102 	psock->saved_rx_msgs = psock->strp.stats.msgs;
103 	psock->saved_rx_bytes = psock->strp.stats.bytes;
104 }
105 
106 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
107 				    struct kcm_psock *psock)
108 {
109 	KCM_STATS_ADD(mux->stats.tx_bytes,
110 		      psock->stats.tx_bytes - psock->saved_tx_bytes);
111 	mux->stats.tx_msgs +=
112 		psock->stats.tx_msgs - psock->saved_tx_msgs;
113 	psock->saved_tx_msgs = psock->stats.tx_msgs;
114 	psock->saved_tx_bytes = psock->stats.tx_bytes;
115 }
116 
117 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
118 
119 /* KCM is ready to receive messages on its queue-- either the KCM is new or
120  * has become unblocked after being blocked on full socket buffer. Queue any
121  * pending ready messages on a psock. RX mux lock held.
122  */
123 static void kcm_rcv_ready(struct kcm_sock *kcm)
124 {
125 	struct kcm_mux *mux = kcm->mux;
126 	struct kcm_psock *psock;
127 	struct sk_buff *skb;
128 
129 	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
130 		return;
131 
132 	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
133 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
134 			/* Assuming buffer limit has been reached */
135 			skb_queue_head(&mux->rx_hold_queue, skb);
136 			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
137 			return;
138 		}
139 	}
140 
141 	while (!list_empty(&mux->psocks_ready)) {
142 		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
143 					 psock_ready_list);
144 
145 		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
146 			/* Assuming buffer limit has been reached */
147 			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
148 			return;
149 		}
150 
151 		/* Consumed the ready message on the psock. Schedule rx_work to
152 		 * get more messages.
153 		 */
154 		list_del(&psock->psock_ready_list);
155 		psock->ready_rx_msg = NULL;
156 		/* Commit clearing of ready_rx_msg for queuing work */
157 		smp_mb();
158 
159 		strp_unpause(&psock->strp);
160 		strp_check_rcv(&psock->strp);
161 	}
162 
163 	/* Buffer limit is okay now, add to ready list */
164 	list_add_tail(&kcm->wait_rx_list,
165 		      &kcm->mux->kcm_rx_waiters);
166 	/* paired with lockless reads in kcm_rfree() */
167 	WRITE_ONCE(kcm->rx_wait, true);
168 }
169 
170 static void kcm_rfree(struct sk_buff *skb)
171 {
172 	struct sock *sk = skb->sk;
173 	struct kcm_sock *kcm = kcm_sk(sk);
174 	struct kcm_mux *mux = kcm->mux;
175 	unsigned int len = skb->truesize;
176 
177 	sk_mem_uncharge(sk, len);
178 	atomic_sub(len, &sk->sk_rmem_alloc);
179 
180 	/* For reading rx_wait and rx_psock without holding lock */
181 	smp_mb__after_atomic();
182 
183 	if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
184 	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
185 		spin_lock_bh(&mux->rx_lock);
186 		kcm_rcv_ready(kcm);
187 		spin_unlock_bh(&mux->rx_lock);
188 	}
189 }
190 
191 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
192 {
193 	struct sk_buff_head *list = &sk->sk_receive_queue;
194 
195 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
196 		return -ENOMEM;
197 
198 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
199 		return -ENOBUFS;
200 
201 	skb->dev = NULL;
202 
203 	skb_orphan(skb);
204 	skb->sk = sk;
205 	skb->destructor = kcm_rfree;
206 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
207 	sk_mem_charge(sk, skb->truesize);
208 
209 	skb_queue_tail(list, skb);
210 
211 	if (!sock_flag(sk, SOCK_DEAD))
212 		sk->sk_data_ready(sk);
213 
214 	return 0;
215 }
216 
217 /* Requeue received messages for a kcm socket to other kcm sockets. This is
218  * called with a kcm socket is receive disabled.
219  * RX mux lock held.
220  */
221 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
222 {
223 	struct sk_buff *skb;
224 	struct kcm_sock *kcm;
225 
226 	while ((skb = skb_dequeue(head))) {
227 		/* Reset destructor to avoid calling kcm_rcv_ready */
228 		skb->destructor = sock_rfree;
229 		skb_orphan(skb);
230 try_again:
231 		if (list_empty(&mux->kcm_rx_waiters)) {
232 			skb_queue_tail(&mux->rx_hold_queue, skb);
233 			continue;
234 		}
235 
236 		kcm = list_first_entry(&mux->kcm_rx_waiters,
237 				       struct kcm_sock, wait_rx_list);
238 
239 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
240 			/* Should mean socket buffer full */
241 			list_del(&kcm->wait_rx_list);
242 			/* paired with lockless reads in kcm_rfree() */
243 			WRITE_ONCE(kcm->rx_wait, false);
244 
245 			/* Commit rx_wait to read in kcm_free */
246 			smp_wmb();
247 
248 			goto try_again;
249 		}
250 	}
251 }
252 
253 /* Lower sock lock held */
254 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
255 				       struct sk_buff *head)
256 {
257 	struct kcm_mux *mux = psock->mux;
258 	struct kcm_sock *kcm;
259 
260 	WARN_ON(psock->ready_rx_msg);
261 
262 	if (psock->rx_kcm)
263 		return psock->rx_kcm;
264 
265 	spin_lock_bh(&mux->rx_lock);
266 
267 	if (psock->rx_kcm) {
268 		spin_unlock_bh(&mux->rx_lock);
269 		return psock->rx_kcm;
270 	}
271 
272 	kcm_update_rx_mux_stats(mux, psock);
273 
274 	if (list_empty(&mux->kcm_rx_waiters)) {
275 		psock->ready_rx_msg = head;
276 		strp_pause(&psock->strp);
277 		list_add_tail(&psock->psock_ready_list,
278 			      &mux->psocks_ready);
279 		spin_unlock_bh(&mux->rx_lock);
280 		return NULL;
281 	}
282 
283 	kcm = list_first_entry(&mux->kcm_rx_waiters,
284 			       struct kcm_sock, wait_rx_list);
285 	list_del(&kcm->wait_rx_list);
286 	/* paired with lockless reads in kcm_rfree() */
287 	WRITE_ONCE(kcm->rx_wait, false);
288 
289 	psock->rx_kcm = kcm;
290 	/* paired with lockless reads in kcm_rfree() */
291 	WRITE_ONCE(kcm->rx_psock, psock);
292 
293 	spin_unlock_bh(&mux->rx_lock);
294 
295 	return kcm;
296 }
297 
298 static void kcm_done(struct kcm_sock *kcm);
299 
300 static void kcm_done_work(struct work_struct *w)
301 {
302 	kcm_done(container_of(w, struct kcm_sock, done_work));
303 }
304 
305 /* Lower sock held */
306 static void unreserve_rx_kcm(struct kcm_psock *psock,
307 			     bool rcv_ready)
308 {
309 	struct kcm_sock *kcm = psock->rx_kcm;
310 	struct kcm_mux *mux = psock->mux;
311 
312 	if (!kcm)
313 		return;
314 
315 	spin_lock_bh(&mux->rx_lock);
316 
317 	psock->rx_kcm = NULL;
318 	/* paired with lockless reads in kcm_rfree() */
319 	WRITE_ONCE(kcm->rx_psock, NULL);
320 
321 	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
322 	 * kcm_rfree
323 	 */
324 	smp_mb();
325 
326 	if (unlikely(kcm->done)) {
327 		spin_unlock_bh(&mux->rx_lock);
328 
329 		/* Need to run kcm_done in a task since we need to qcquire
330 		 * callback locks which may already be held here.
331 		 */
332 		INIT_WORK(&kcm->done_work, kcm_done_work);
333 		schedule_work(&kcm->done_work);
334 		return;
335 	}
336 
337 	if (unlikely(kcm->rx_disabled)) {
338 		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
339 	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
340 		/* Check for degenerative race with rx_wait that all
341 		 * data was dequeued (accounted for in kcm_rfree).
342 		 */
343 		kcm_rcv_ready(kcm);
344 	}
345 	spin_unlock_bh(&mux->rx_lock);
346 }
347 
348 /* Lower sock lock held */
349 static void psock_data_ready(struct sock *sk)
350 {
351 	struct kcm_psock *psock;
352 
353 	trace_sk_data_ready(sk);
354 
355 	read_lock_bh(&sk->sk_callback_lock);
356 
357 	psock = (struct kcm_psock *)sk->sk_user_data;
358 	if (likely(psock))
359 		strp_data_ready(&psock->strp);
360 
361 	read_unlock_bh(&sk->sk_callback_lock);
362 }
363 
364 /* Called with lower sock held */
365 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
366 {
367 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
368 	struct kcm_sock *kcm;
369 
370 try_queue:
371 	kcm = reserve_rx_kcm(psock, skb);
372 	if (!kcm) {
373 		 /* Unable to reserve a KCM, message is held in psock and strp
374 		  * is paused.
375 		  */
376 		return;
377 	}
378 
379 	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
380 		/* Should mean socket buffer full */
381 		unreserve_rx_kcm(psock, false);
382 		goto try_queue;
383 	}
384 }
385 
386 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
387 {
388 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
389 	struct bpf_prog *prog = psock->bpf_prog;
390 	int res;
391 
392 	res = bpf_prog_run_pin_on_cpu(prog, skb);
393 	return res;
394 }
395 
396 static int kcm_read_sock_done(struct strparser *strp, int err)
397 {
398 	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
399 
400 	unreserve_rx_kcm(psock, true);
401 
402 	return err;
403 }
404 
405 static void psock_state_change(struct sock *sk)
406 {
407 	/* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
408 	 * since application will normally not poll with EPOLLIN
409 	 * on the TCP sockets.
410 	 */
411 
412 	report_csk_error(sk, EPIPE);
413 }
414 
415 static void psock_write_space(struct sock *sk)
416 {
417 	struct kcm_psock *psock;
418 	struct kcm_mux *mux;
419 	struct kcm_sock *kcm;
420 
421 	read_lock_bh(&sk->sk_callback_lock);
422 
423 	psock = (struct kcm_psock *)sk->sk_user_data;
424 	if (unlikely(!psock))
425 		goto out;
426 	mux = psock->mux;
427 
428 	spin_lock_bh(&mux->lock);
429 
430 	/* Check if the socket is reserved so someone is waiting for sending. */
431 	kcm = psock->tx_kcm;
432 	if (kcm && !unlikely(kcm->tx_stopped))
433 		queue_work(kcm_wq, &kcm->tx_work);
434 
435 	spin_unlock_bh(&mux->lock);
436 out:
437 	read_unlock_bh(&sk->sk_callback_lock);
438 }
439 
440 static void unreserve_psock(struct kcm_sock *kcm);
441 
442 /* kcm sock is locked. */
443 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
444 {
445 	struct kcm_mux *mux = kcm->mux;
446 	struct kcm_psock *psock;
447 
448 	psock = kcm->tx_psock;
449 
450 	smp_rmb(); /* Must read tx_psock before tx_wait */
451 
452 	if (psock) {
453 		WARN_ON(kcm->tx_wait);
454 		if (unlikely(psock->tx_stopped))
455 			unreserve_psock(kcm);
456 		else
457 			return kcm->tx_psock;
458 	}
459 
460 	spin_lock_bh(&mux->lock);
461 
462 	/* Check again under lock to see if psock was reserved for this
463 	 * psock via psock_unreserve.
464 	 */
465 	psock = kcm->tx_psock;
466 	if (unlikely(psock)) {
467 		WARN_ON(kcm->tx_wait);
468 		spin_unlock_bh(&mux->lock);
469 		return kcm->tx_psock;
470 	}
471 
472 	if (!list_empty(&mux->psocks_avail)) {
473 		psock = list_first_entry(&mux->psocks_avail,
474 					 struct kcm_psock,
475 					 psock_avail_list);
476 		list_del(&psock->psock_avail_list);
477 		if (kcm->tx_wait) {
478 			list_del(&kcm->wait_psock_list);
479 			kcm->tx_wait = false;
480 		}
481 		kcm->tx_psock = psock;
482 		psock->tx_kcm = kcm;
483 		KCM_STATS_INCR(psock->stats.reserved);
484 	} else if (!kcm->tx_wait) {
485 		list_add_tail(&kcm->wait_psock_list,
486 			      &mux->kcm_tx_waiters);
487 		kcm->tx_wait = true;
488 	}
489 
490 	spin_unlock_bh(&mux->lock);
491 
492 	return psock;
493 }
494 
495 /* mux lock held */
496 static void psock_now_avail(struct kcm_psock *psock)
497 {
498 	struct kcm_mux *mux = psock->mux;
499 	struct kcm_sock *kcm;
500 
501 	if (list_empty(&mux->kcm_tx_waiters)) {
502 		list_add_tail(&psock->psock_avail_list,
503 			      &mux->psocks_avail);
504 	} else {
505 		kcm = list_first_entry(&mux->kcm_tx_waiters,
506 				       struct kcm_sock,
507 				       wait_psock_list);
508 		list_del(&kcm->wait_psock_list);
509 		kcm->tx_wait = false;
510 		psock->tx_kcm = kcm;
511 
512 		/* Commit before changing tx_psock since that is read in
513 		 * reserve_psock before queuing work.
514 		 */
515 		smp_mb();
516 
517 		kcm->tx_psock = psock;
518 		KCM_STATS_INCR(psock->stats.reserved);
519 		queue_work(kcm_wq, &kcm->tx_work);
520 	}
521 }
522 
523 /* kcm sock is locked. */
524 static void unreserve_psock(struct kcm_sock *kcm)
525 {
526 	struct kcm_psock *psock;
527 	struct kcm_mux *mux = kcm->mux;
528 
529 	spin_lock_bh(&mux->lock);
530 
531 	psock = kcm->tx_psock;
532 
533 	if (WARN_ON(!psock)) {
534 		spin_unlock_bh(&mux->lock);
535 		return;
536 	}
537 
538 	smp_rmb(); /* Read tx_psock before tx_wait */
539 
540 	kcm_update_tx_mux_stats(mux, psock);
541 
542 	WARN_ON(kcm->tx_wait);
543 
544 	kcm->tx_psock = NULL;
545 	psock->tx_kcm = NULL;
546 	KCM_STATS_INCR(psock->stats.unreserved);
547 
548 	if (unlikely(psock->tx_stopped)) {
549 		if (psock->done) {
550 			/* Deferred free */
551 			list_del(&psock->psock_list);
552 			mux->psocks_cnt--;
553 			sock_put(psock->sk);
554 			fput(psock->sk->sk_socket->file);
555 			kmem_cache_free(kcm_psockp, psock);
556 		}
557 
558 		/* Don't put back on available list */
559 
560 		spin_unlock_bh(&mux->lock);
561 
562 		return;
563 	}
564 
565 	psock_now_avail(psock);
566 
567 	spin_unlock_bh(&mux->lock);
568 }
569 
570 static void kcm_report_tx_retry(struct kcm_sock *kcm)
571 {
572 	struct kcm_mux *mux = kcm->mux;
573 
574 	spin_lock_bh(&mux->lock);
575 	KCM_STATS_INCR(mux->stats.tx_retries);
576 	spin_unlock_bh(&mux->lock);
577 }
578 
579 /* Write any messages ready on the kcm socket.  Called with kcm sock lock
580  * held.  Return bytes actually sent or error.
581  */
582 static int kcm_write_msgs(struct kcm_sock *kcm)
583 {
584 	struct sock *sk = &kcm->sk;
585 	struct kcm_psock *psock;
586 	struct sk_buff *skb, *head;
587 	struct kcm_tx_msg *txm;
588 	unsigned short fragidx, frag_offset;
589 	unsigned int sent, total_sent = 0;
590 	int ret = 0;
591 
592 	kcm->tx_wait_more = false;
593 	psock = kcm->tx_psock;
594 	if (unlikely(psock && psock->tx_stopped)) {
595 		/* A reserved psock was aborted asynchronously. Unreserve
596 		 * it and we'll retry the message.
597 		 */
598 		unreserve_psock(kcm);
599 		kcm_report_tx_retry(kcm);
600 		if (skb_queue_empty(&sk->sk_write_queue))
601 			return 0;
602 
603 		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
604 
605 	} else if (skb_queue_empty(&sk->sk_write_queue)) {
606 		return 0;
607 	}
608 
609 	head = skb_peek(&sk->sk_write_queue);
610 	txm = kcm_tx_msg(head);
611 
612 	if (txm->sent) {
613 		/* Send of first skbuff in queue already in progress */
614 		if (WARN_ON(!psock)) {
615 			ret = -EINVAL;
616 			goto out;
617 		}
618 		sent = txm->sent;
619 		frag_offset = txm->frag_offset;
620 		fragidx = txm->fragidx;
621 		skb = txm->frag_skb;
622 
623 		goto do_frag;
624 	}
625 
626 try_again:
627 	psock = reserve_psock(kcm);
628 	if (!psock)
629 		goto out;
630 
631 	do {
632 		skb = head;
633 		txm = kcm_tx_msg(head);
634 		sent = 0;
635 
636 do_frag_list:
637 		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
638 			ret = -EINVAL;
639 			goto out;
640 		}
641 
642 		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
643 		     fragidx++) {
644 			skb_frag_t *frag;
645 
646 			frag_offset = 0;
647 do_frag:
648 			frag = &skb_shinfo(skb)->frags[fragidx];
649 			if (WARN_ON(!skb_frag_size(frag))) {
650 				ret = -EINVAL;
651 				goto out;
652 			}
653 
654 			ret = kernel_sendpage(psock->sk->sk_socket,
655 					      skb_frag_page(frag),
656 					      skb_frag_off(frag) + frag_offset,
657 					      skb_frag_size(frag) - frag_offset,
658 					      MSG_DONTWAIT);
659 			if (ret <= 0) {
660 				if (ret == -EAGAIN) {
661 					/* Save state to try again when there's
662 					 * write space on the socket
663 					 */
664 					txm->sent = sent;
665 					txm->frag_offset = frag_offset;
666 					txm->fragidx = fragidx;
667 					txm->frag_skb = skb;
668 
669 					ret = 0;
670 					goto out;
671 				}
672 
673 				/* Hard failure in sending message, abort this
674 				 * psock since it has lost framing
675 				 * synchronization and retry sending the
676 				 * message from the beginning.
677 				 */
678 				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
679 						   true);
680 				unreserve_psock(kcm);
681 
682 				txm->sent = 0;
683 				kcm_report_tx_retry(kcm);
684 				ret = 0;
685 
686 				goto try_again;
687 			}
688 
689 			sent += ret;
690 			frag_offset += ret;
691 			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
692 			if (frag_offset < skb_frag_size(frag)) {
693 				/* Not finished with this frag */
694 				goto do_frag;
695 			}
696 		}
697 
698 		if (skb == head) {
699 			if (skb_has_frag_list(skb)) {
700 				skb = skb_shinfo(skb)->frag_list;
701 				goto do_frag_list;
702 			}
703 		} else if (skb->next) {
704 			skb = skb->next;
705 			goto do_frag_list;
706 		}
707 
708 		/* Successfully sent the whole packet, account for it. */
709 		skb_dequeue(&sk->sk_write_queue);
710 		kfree_skb(head);
711 		sk->sk_wmem_queued -= sent;
712 		total_sent += sent;
713 		KCM_STATS_INCR(psock->stats.tx_msgs);
714 	} while ((head = skb_peek(&sk->sk_write_queue)));
715 out:
716 	if (!head) {
717 		/* Done with all queued messages. */
718 		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
719 		unreserve_psock(kcm);
720 	}
721 
722 	/* Check if write space is available */
723 	sk->sk_write_space(sk);
724 
725 	return total_sent ? : ret;
726 }
727 
728 static void kcm_tx_work(struct work_struct *w)
729 {
730 	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
731 	struct sock *sk = &kcm->sk;
732 	int err;
733 
734 	lock_sock(sk);
735 
736 	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
737 	 * aborts
738 	 */
739 	err = kcm_write_msgs(kcm);
740 	if (err < 0) {
741 		/* Hard failure in write, report error on KCM socket */
742 		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
743 		report_csk_error(&kcm->sk, -err);
744 		goto out;
745 	}
746 
747 	/* Primarily for SOCK_SEQPACKET sockets */
748 	if (likely(sk->sk_socket) &&
749 	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
750 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
751 		sk->sk_write_space(sk);
752 	}
753 
754 out:
755 	release_sock(sk);
756 }
757 
758 static void kcm_push(struct kcm_sock *kcm)
759 {
760 	if (kcm->tx_wait_more)
761 		kcm_write_msgs(kcm);
762 }
763 
764 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
765 {
766 	struct sock *sk = sock->sk;
767 	struct kcm_sock *kcm = kcm_sk(sk);
768 	struct sk_buff *skb = NULL, *head = NULL;
769 	size_t copy, copied = 0;
770 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
771 	int eor = (sock->type == SOCK_DGRAM) ?
772 		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
773 	int err = -EPIPE;
774 
775 	lock_sock(sk);
776 
777 	/* Per tcp_sendmsg this should be in poll */
778 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
779 
780 	if (sk->sk_err)
781 		goto out_error;
782 
783 	if (kcm->seq_skb) {
784 		/* Previously opened message */
785 		head = kcm->seq_skb;
786 		skb = kcm_tx_msg(head)->last_skb;
787 		goto start;
788 	}
789 
790 	/* Call the sk_stream functions to manage the sndbuf mem. */
791 	if (!sk_stream_memory_free(sk)) {
792 		kcm_push(kcm);
793 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
794 		err = sk_stream_wait_memory(sk, &timeo);
795 		if (err)
796 			goto out_error;
797 	}
798 
799 	if (msg_data_left(msg)) {
800 		/* New message, alloc head skb */
801 		head = alloc_skb(0, sk->sk_allocation);
802 		while (!head) {
803 			kcm_push(kcm);
804 			err = sk_stream_wait_memory(sk, &timeo);
805 			if (err)
806 				goto out_error;
807 
808 			head = alloc_skb(0, sk->sk_allocation);
809 		}
810 
811 		skb = head;
812 
813 		/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
814 		 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
815 		 */
816 		skb->ip_summed = CHECKSUM_UNNECESSARY;
817 	}
818 
819 start:
820 	while (msg_data_left(msg)) {
821 		bool merge = true;
822 		int i = skb_shinfo(skb)->nr_frags;
823 		struct page_frag *pfrag = sk_page_frag(sk);
824 
825 		if (!sk_page_frag_refill(sk, pfrag))
826 			goto wait_for_memory;
827 
828 		if (!skb_can_coalesce(skb, i, pfrag->page,
829 				      pfrag->offset)) {
830 			if (i == MAX_SKB_FRAGS) {
831 				struct sk_buff *tskb;
832 
833 				tskb = alloc_skb(0, sk->sk_allocation);
834 				if (!tskb)
835 					goto wait_for_memory;
836 
837 				if (head == skb)
838 					skb_shinfo(head)->frag_list = tskb;
839 				else
840 					skb->next = tskb;
841 
842 				skb = tskb;
843 				skb->ip_summed = CHECKSUM_UNNECESSARY;
844 				continue;
845 			}
846 			merge = false;
847 		}
848 
849 		if (msg->msg_flags & MSG_SPLICE_PAGES) {
850 			copy = msg_data_left(msg);
851 			if (!sk_wmem_schedule(sk, copy))
852 				goto wait_for_memory;
853 
854 			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
855 						   sk->sk_allocation);
856 			if (err < 0) {
857 				if (err == -EMSGSIZE)
858 					goto wait_for_memory;
859 				goto out_error;
860 			}
861 
862 			copy = err;
863 			skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
864 			sk_wmem_queued_add(sk, copy);
865 			sk_mem_charge(sk, copy);
866 
867 			if (head != skb)
868 				head->truesize += copy;
869 		} else {
870 			copy = min_t(int, msg_data_left(msg),
871 				     pfrag->size - pfrag->offset);
872 			if (!sk_wmem_schedule(sk, copy))
873 				goto wait_for_memory;
874 
875 			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
876 						       pfrag->page,
877 						       pfrag->offset,
878 						       copy);
879 			if (err)
880 				goto out_error;
881 
882 			/* Update the skb. */
883 			if (merge) {
884 				skb_frag_size_add(
885 					&skb_shinfo(skb)->frags[i - 1], copy);
886 			} else {
887 				skb_fill_page_desc(skb, i, pfrag->page,
888 						   pfrag->offset, copy);
889 				get_page(pfrag->page);
890 			}
891 
892 			pfrag->offset += copy;
893 		}
894 
895 		copied += copy;
896 		if (head != skb) {
897 			head->len += copy;
898 			head->data_len += copy;
899 		}
900 
901 		continue;
902 
903 wait_for_memory:
904 		kcm_push(kcm);
905 		err = sk_stream_wait_memory(sk, &timeo);
906 		if (err)
907 			goto out_error;
908 	}
909 
910 	if (eor) {
911 		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
912 
913 		if (head) {
914 			/* Message complete, queue it on send buffer */
915 			__skb_queue_tail(&sk->sk_write_queue, head);
916 			kcm->seq_skb = NULL;
917 			KCM_STATS_INCR(kcm->stats.tx_msgs);
918 		}
919 
920 		if (msg->msg_flags & MSG_BATCH) {
921 			kcm->tx_wait_more = true;
922 		} else if (kcm->tx_wait_more || not_busy) {
923 			err = kcm_write_msgs(kcm);
924 			if (err < 0) {
925 				/* We got a hard error in write_msgs but have
926 				 * already queued this message. Report an error
927 				 * in the socket, but don't affect return value
928 				 * from sendmsg
929 				 */
930 				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
931 				report_csk_error(&kcm->sk, -err);
932 			}
933 		}
934 	} else {
935 		/* Message not complete, save state */
936 partial_message:
937 		if (head) {
938 			kcm->seq_skb = head;
939 			kcm_tx_msg(head)->last_skb = skb;
940 		}
941 	}
942 
943 	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
944 
945 	release_sock(sk);
946 	return copied;
947 
948 out_error:
949 	kcm_push(kcm);
950 
951 	if (copied && sock->type == SOCK_SEQPACKET) {
952 		/* Wrote some bytes before encountering an
953 		 * error, return partial success.
954 		 */
955 		goto partial_message;
956 	}
957 
958 	if (head != kcm->seq_skb)
959 		kfree_skb(head);
960 
961 	err = sk_stream_error(sk, msg->msg_flags, err);
962 
963 	/* make sure we wake any epoll edge trigger waiter */
964 	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
965 		sk->sk_write_space(sk);
966 
967 	release_sock(sk);
968 	return err;
969 }
970 
971 static void kcm_splice_eof(struct socket *sock)
972 {
973 	struct sock *sk = sock->sk;
974 	struct kcm_sock *kcm = kcm_sk(sk);
975 
976 	if (skb_queue_empty_lockless(&sk->sk_write_queue))
977 		return;
978 
979 	lock_sock(sk);
980 	kcm_write_msgs(kcm);
981 	release_sock(sk);
982 }
983 
984 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
985 			    int offset, size_t size, int flags)
986 
987 {
988 	struct bio_vec bvec;
989 	struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
990 
991 	if (flags & MSG_SENDPAGE_NOTLAST)
992 		msg.msg_flags |= MSG_MORE;
993 
994 	if (flags & MSG_OOB)
995 		return -EOPNOTSUPP;
996 
997 	bvec_set_page(&bvec, page, size, offset);
998 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
999 	return kcm_sendmsg(sock, &msg, size);
1000 }
1001 
1002 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1003 		       size_t len, int flags)
1004 {
1005 	struct sock *sk = sock->sk;
1006 	struct kcm_sock *kcm = kcm_sk(sk);
1007 	int err = 0;
1008 	struct strp_msg *stm;
1009 	int copied = 0;
1010 	struct sk_buff *skb;
1011 
1012 	skb = skb_recv_datagram(sk, flags, &err);
1013 	if (!skb)
1014 		goto out;
1015 
1016 	/* Okay, have a message on the receive queue */
1017 
1018 	stm = strp_msg(skb);
1019 
1020 	if (len > stm->full_len)
1021 		len = stm->full_len;
1022 
1023 	err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
1024 	if (err < 0)
1025 		goto out;
1026 
1027 	copied = len;
1028 	if (likely(!(flags & MSG_PEEK))) {
1029 		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1030 		if (copied < stm->full_len) {
1031 			if (sock->type == SOCK_DGRAM) {
1032 				/* Truncated message */
1033 				msg->msg_flags |= MSG_TRUNC;
1034 				goto msg_finished;
1035 			}
1036 			stm->offset += copied;
1037 			stm->full_len -= copied;
1038 		} else {
1039 msg_finished:
1040 			/* Finished with message */
1041 			msg->msg_flags |= MSG_EOR;
1042 			KCM_STATS_INCR(kcm->stats.rx_msgs);
1043 		}
1044 	}
1045 
1046 out:
1047 	skb_free_datagram(sk, skb);
1048 	return copied ? : err;
1049 }
1050 
1051 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1052 			       struct pipe_inode_info *pipe, size_t len,
1053 			       unsigned int flags)
1054 {
1055 	struct sock *sk = sock->sk;
1056 	struct kcm_sock *kcm = kcm_sk(sk);
1057 	struct strp_msg *stm;
1058 	int err = 0;
1059 	ssize_t copied;
1060 	struct sk_buff *skb;
1061 
1062 	/* Only support splice for SOCKSEQPACKET */
1063 
1064 	skb = skb_recv_datagram(sk, flags, &err);
1065 	if (!skb)
1066 		goto err_out;
1067 
1068 	/* Okay, have a message on the receive queue */
1069 
1070 	stm = strp_msg(skb);
1071 
1072 	if (len > stm->full_len)
1073 		len = stm->full_len;
1074 
1075 	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1076 	if (copied < 0) {
1077 		err = copied;
1078 		goto err_out;
1079 	}
1080 
1081 	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1082 
1083 	stm->offset += copied;
1084 	stm->full_len -= copied;
1085 
1086 	/* We have no way to return MSG_EOR. If all the bytes have been
1087 	 * read we still leave the message in the receive socket buffer.
1088 	 * A subsequent recvmsg needs to be done to return MSG_EOR and
1089 	 * finish reading the message.
1090 	 */
1091 
1092 	skb_free_datagram(sk, skb);
1093 	return copied;
1094 
1095 err_out:
1096 	skb_free_datagram(sk, skb);
1097 	return err;
1098 }
1099 
1100 /* kcm sock lock held */
1101 static void kcm_recv_disable(struct kcm_sock *kcm)
1102 {
1103 	struct kcm_mux *mux = kcm->mux;
1104 
1105 	if (kcm->rx_disabled)
1106 		return;
1107 
1108 	spin_lock_bh(&mux->rx_lock);
1109 
1110 	kcm->rx_disabled = 1;
1111 
1112 	/* If a psock is reserved we'll do cleanup in unreserve */
1113 	if (!kcm->rx_psock) {
1114 		if (kcm->rx_wait) {
1115 			list_del(&kcm->wait_rx_list);
1116 			/* paired with lockless reads in kcm_rfree() */
1117 			WRITE_ONCE(kcm->rx_wait, false);
1118 		}
1119 
1120 		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1121 	}
1122 
1123 	spin_unlock_bh(&mux->rx_lock);
1124 }
1125 
1126 /* kcm sock lock held */
1127 static void kcm_recv_enable(struct kcm_sock *kcm)
1128 {
1129 	struct kcm_mux *mux = kcm->mux;
1130 
1131 	if (!kcm->rx_disabled)
1132 		return;
1133 
1134 	spin_lock_bh(&mux->rx_lock);
1135 
1136 	kcm->rx_disabled = 0;
1137 	kcm_rcv_ready(kcm);
1138 
1139 	spin_unlock_bh(&mux->rx_lock);
1140 }
1141 
1142 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1143 			  sockptr_t optval, unsigned int optlen)
1144 {
1145 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1146 	int val, valbool;
1147 	int err = 0;
1148 
1149 	if (level != SOL_KCM)
1150 		return -ENOPROTOOPT;
1151 
1152 	if (optlen < sizeof(int))
1153 		return -EINVAL;
1154 
1155 	if (copy_from_sockptr(&val, optval, sizeof(int)))
1156 		return -EFAULT;
1157 
1158 	valbool = val ? 1 : 0;
1159 
1160 	switch (optname) {
1161 	case KCM_RECV_DISABLE:
1162 		lock_sock(&kcm->sk);
1163 		if (valbool)
1164 			kcm_recv_disable(kcm);
1165 		else
1166 			kcm_recv_enable(kcm);
1167 		release_sock(&kcm->sk);
1168 		break;
1169 	default:
1170 		err = -ENOPROTOOPT;
1171 	}
1172 
1173 	return err;
1174 }
1175 
1176 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1177 			  char __user *optval, int __user *optlen)
1178 {
1179 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1180 	int val, len;
1181 
1182 	if (level != SOL_KCM)
1183 		return -ENOPROTOOPT;
1184 
1185 	if (get_user(len, optlen))
1186 		return -EFAULT;
1187 
1188 	len = min_t(unsigned int, len, sizeof(int));
1189 	if (len < 0)
1190 		return -EINVAL;
1191 
1192 	switch (optname) {
1193 	case KCM_RECV_DISABLE:
1194 		val = kcm->rx_disabled;
1195 		break;
1196 	default:
1197 		return -ENOPROTOOPT;
1198 	}
1199 
1200 	if (put_user(len, optlen))
1201 		return -EFAULT;
1202 	if (copy_to_user(optval, &val, len))
1203 		return -EFAULT;
1204 	return 0;
1205 }
1206 
1207 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1208 {
1209 	struct kcm_sock *tkcm;
1210 	struct list_head *head;
1211 	int index = 0;
1212 
1213 	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1214 	 * we set sk_state, otherwise epoll_wait always returns right away with
1215 	 * EPOLLHUP
1216 	 */
1217 	kcm->sk.sk_state = TCP_ESTABLISHED;
1218 
1219 	/* Add to mux's kcm sockets list */
1220 	kcm->mux = mux;
1221 	spin_lock_bh(&mux->lock);
1222 
1223 	head = &mux->kcm_socks;
1224 	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1225 		if (tkcm->index != index)
1226 			break;
1227 		head = &tkcm->kcm_sock_list;
1228 		index++;
1229 	}
1230 
1231 	list_add(&kcm->kcm_sock_list, head);
1232 	kcm->index = index;
1233 
1234 	mux->kcm_socks_cnt++;
1235 	spin_unlock_bh(&mux->lock);
1236 
1237 	INIT_WORK(&kcm->tx_work, kcm_tx_work);
1238 
1239 	spin_lock_bh(&mux->rx_lock);
1240 	kcm_rcv_ready(kcm);
1241 	spin_unlock_bh(&mux->rx_lock);
1242 }
1243 
1244 static int kcm_attach(struct socket *sock, struct socket *csock,
1245 		      struct bpf_prog *prog)
1246 {
1247 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1248 	struct kcm_mux *mux = kcm->mux;
1249 	struct sock *csk;
1250 	struct kcm_psock *psock = NULL, *tpsock;
1251 	struct list_head *head;
1252 	int index = 0;
1253 	static const struct strp_callbacks cb = {
1254 		.rcv_msg = kcm_rcv_strparser,
1255 		.parse_msg = kcm_parse_func_strparser,
1256 		.read_sock_done = kcm_read_sock_done,
1257 	};
1258 	int err = 0;
1259 
1260 	csk = csock->sk;
1261 	if (!csk)
1262 		return -EINVAL;
1263 
1264 	lock_sock(csk);
1265 
1266 	/* Only allow TCP sockets to be attached for now */
1267 	if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1268 	    csk->sk_protocol != IPPROTO_TCP) {
1269 		err = -EOPNOTSUPP;
1270 		goto out;
1271 	}
1272 
1273 	/* Don't allow listeners or closed sockets */
1274 	if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1275 		err = -EOPNOTSUPP;
1276 		goto out;
1277 	}
1278 
1279 	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1280 	if (!psock) {
1281 		err = -ENOMEM;
1282 		goto out;
1283 	}
1284 
1285 	psock->mux = mux;
1286 	psock->sk = csk;
1287 	psock->bpf_prog = prog;
1288 
1289 	write_lock_bh(&csk->sk_callback_lock);
1290 
1291 	/* Check if sk_user_data is already by KCM or someone else.
1292 	 * Must be done under lock to prevent race conditions.
1293 	 */
1294 	if (csk->sk_user_data) {
1295 		write_unlock_bh(&csk->sk_callback_lock);
1296 		kmem_cache_free(kcm_psockp, psock);
1297 		err = -EALREADY;
1298 		goto out;
1299 	}
1300 
1301 	err = strp_init(&psock->strp, csk, &cb);
1302 	if (err) {
1303 		write_unlock_bh(&csk->sk_callback_lock);
1304 		kmem_cache_free(kcm_psockp, psock);
1305 		goto out;
1306 	}
1307 
1308 	psock->save_data_ready = csk->sk_data_ready;
1309 	psock->save_write_space = csk->sk_write_space;
1310 	psock->save_state_change = csk->sk_state_change;
1311 	csk->sk_user_data = psock;
1312 	csk->sk_data_ready = psock_data_ready;
1313 	csk->sk_write_space = psock_write_space;
1314 	csk->sk_state_change = psock_state_change;
1315 
1316 	write_unlock_bh(&csk->sk_callback_lock);
1317 
1318 	sock_hold(csk);
1319 
1320 	/* Finished initialization, now add the psock to the MUX. */
1321 	spin_lock_bh(&mux->lock);
1322 	head = &mux->psocks;
1323 	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1324 		if (tpsock->index != index)
1325 			break;
1326 		head = &tpsock->psock_list;
1327 		index++;
1328 	}
1329 
1330 	list_add(&psock->psock_list, head);
1331 	psock->index = index;
1332 
1333 	KCM_STATS_INCR(mux->stats.psock_attach);
1334 	mux->psocks_cnt++;
1335 	psock_now_avail(psock);
1336 	spin_unlock_bh(&mux->lock);
1337 
1338 	/* Schedule RX work in case there are already bytes queued */
1339 	strp_check_rcv(&psock->strp);
1340 
1341 out:
1342 	release_sock(csk);
1343 
1344 	return err;
1345 }
1346 
1347 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1348 {
1349 	struct socket *csock;
1350 	struct bpf_prog *prog;
1351 	int err;
1352 
1353 	csock = sockfd_lookup(info->fd, &err);
1354 	if (!csock)
1355 		return -ENOENT;
1356 
1357 	prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1358 	if (IS_ERR(prog)) {
1359 		err = PTR_ERR(prog);
1360 		goto out;
1361 	}
1362 
1363 	err = kcm_attach(sock, csock, prog);
1364 	if (err) {
1365 		bpf_prog_put(prog);
1366 		goto out;
1367 	}
1368 
1369 	/* Keep reference on file also */
1370 
1371 	return 0;
1372 out:
1373 	sockfd_put(csock);
1374 	return err;
1375 }
1376 
1377 static void kcm_unattach(struct kcm_psock *psock)
1378 {
1379 	struct sock *csk = psock->sk;
1380 	struct kcm_mux *mux = psock->mux;
1381 
1382 	lock_sock(csk);
1383 
1384 	/* Stop getting callbacks from TCP socket. After this there should
1385 	 * be no way to reserve a kcm for this psock.
1386 	 */
1387 	write_lock_bh(&csk->sk_callback_lock);
1388 	csk->sk_user_data = NULL;
1389 	csk->sk_data_ready = psock->save_data_ready;
1390 	csk->sk_write_space = psock->save_write_space;
1391 	csk->sk_state_change = psock->save_state_change;
1392 	strp_stop(&psock->strp);
1393 
1394 	if (WARN_ON(psock->rx_kcm)) {
1395 		write_unlock_bh(&csk->sk_callback_lock);
1396 		release_sock(csk);
1397 		return;
1398 	}
1399 
1400 	spin_lock_bh(&mux->rx_lock);
1401 
1402 	/* Stop receiver activities. After this point psock should not be
1403 	 * able to get onto ready list either through callbacks or work.
1404 	 */
1405 	if (psock->ready_rx_msg) {
1406 		list_del(&psock->psock_ready_list);
1407 		kfree_skb(psock->ready_rx_msg);
1408 		psock->ready_rx_msg = NULL;
1409 		KCM_STATS_INCR(mux->stats.rx_ready_drops);
1410 	}
1411 
1412 	spin_unlock_bh(&mux->rx_lock);
1413 
1414 	write_unlock_bh(&csk->sk_callback_lock);
1415 
1416 	/* Call strp_done without sock lock */
1417 	release_sock(csk);
1418 	strp_done(&psock->strp);
1419 	lock_sock(csk);
1420 
1421 	bpf_prog_put(psock->bpf_prog);
1422 
1423 	spin_lock_bh(&mux->lock);
1424 
1425 	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1426 	save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1427 
1428 	KCM_STATS_INCR(mux->stats.psock_unattach);
1429 
1430 	if (psock->tx_kcm) {
1431 		/* psock was reserved.  Just mark it finished and we will clean
1432 		 * up in the kcm paths, we need kcm lock which can not be
1433 		 * acquired here.
1434 		 */
1435 		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1436 		spin_unlock_bh(&mux->lock);
1437 
1438 		/* We are unattaching a socket that is reserved. Abort the
1439 		 * socket since we may be out of sync in sending on it. We need
1440 		 * to do this without the mux lock.
1441 		 */
1442 		kcm_abort_tx_psock(psock, EPIPE, false);
1443 
1444 		spin_lock_bh(&mux->lock);
1445 		if (!psock->tx_kcm) {
1446 			/* psock now unreserved in window mux was unlocked */
1447 			goto no_reserved;
1448 		}
1449 		psock->done = 1;
1450 
1451 		/* Commit done before queuing work to process it */
1452 		smp_mb();
1453 
1454 		/* Queue tx work to make sure psock->done is handled */
1455 		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1456 		spin_unlock_bh(&mux->lock);
1457 	} else {
1458 no_reserved:
1459 		if (!psock->tx_stopped)
1460 			list_del(&psock->psock_avail_list);
1461 		list_del(&psock->psock_list);
1462 		mux->psocks_cnt--;
1463 		spin_unlock_bh(&mux->lock);
1464 
1465 		sock_put(csk);
1466 		fput(csk->sk_socket->file);
1467 		kmem_cache_free(kcm_psockp, psock);
1468 	}
1469 
1470 	release_sock(csk);
1471 }
1472 
1473 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1474 {
1475 	struct kcm_sock *kcm = kcm_sk(sock->sk);
1476 	struct kcm_mux *mux = kcm->mux;
1477 	struct kcm_psock *psock;
1478 	struct socket *csock;
1479 	struct sock *csk;
1480 	int err;
1481 
1482 	csock = sockfd_lookup(info->fd, &err);
1483 	if (!csock)
1484 		return -ENOENT;
1485 
1486 	csk = csock->sk;
1487 	if (!csk) {
1488 		err = -EINVAL;
1489 		goto out;
1490 	}
1491 
1492 	err = -ENOENT;
1493 
1494 	spin_lock_bh(&mux->lock);
1495 
1496 	list_for_each_entry(psock, &mux->psocks, psock_list) {
1497 		if (psock->sk != csk)
1498 			continue;
1499 
1500 		/* Found the matching psock */
1501 
1502 		if (psock->unattaching || WARN_ON(psock->done)) {
1503 			err = -EALREADY;
1504 			break;
1505 		}
1506 
1507 		psock->unattaching = 1;
1508 
1509 		spin_unlock_bh(&mux->lock);
1510 
1511 		/* Lower socket lock should already be held */
1512 		kcm_unattach(psock);
1513 
1514 		err = 0;
1515 		goto out;
1516 	}
1517 
1518 	spin_unlock_bh(&mux->lock);
1519 
1520 out:
1521 	sockfd_put(csock);
1522 	return err;
1523 }
1524 
1525 static struct proto kcm_proto = {
1526 	.name	= "KCM",
1527 	.owner	= THIS_MODULE,
1528 	.obj_size = sizeof(struct kcm_sock),
1529 };
1530 
1531 /* Clone a kcm socket. */
1532 static struct file *kcm_clone(struct socket *osock)
1533 {
1534 	struct socket *newsock;
1535 	struct sock *newsk;
1536 
1537 	newsock = sock_alloc();
1538 	if (!newsock)
1539 		return ERR_PTR(-ENFILE);
1540 
1541 	newsock->type = osock->type;
1542 	newsock->ops = osock->ops;
1543 
1544 	__module_get(newsock->ops->owner);
1545 
1546 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1547 			 &kcm_proto, false);
1548 	if (!newsk) {
1549 		sock_release(newsock);
1550 		return ERR_PTR(-ENOMEM);
1551 	}
1552 	sock_init_data(newsock, newsk);
1553 	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1554 
1555 	return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1556 }
1557 
1558 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1559 {
1560 	int err;
1561 
1562 	switch (cmd) {
1563 	case SIOCKCMATTACH: {
1564 		struct kcm_attach info;
1565 
1566 		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1567 			return -EFAULT;
1568 
1569 		err = kcm_attach_ioctl(sock, &info);
1570 
1571 		break;
1572 	}
1573 	case SIOCKCMUNATTACH: {
1574 		struct kcm_unattach info;
1575 
1576 		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1577 			return -EFAULT;
1578 
1579 		err = kcm_unattach_ioctl(sock, &info);
1580 
1581 		break;
1582 	}
1583 	case SIOCKCMCLONE: {
1584 		struct kcm_clone info;
1585 		struct file *file;
1586 
1587 		info.fd = get_unused_fd_flags(0);
1588 		if (unlikely(info.fd < 0))
1589 			return info.fd;
1590 
1591 		file = kcm_clone(sock);
1592 		if (IS_ERR(file)) {
1593 			put_unused_fd(info.fd);
1594 			return PTR_ERR(file);
1595 		}
1596 		if (copy_to_user((void __user *)arg, &info,
1597 				 sizeof(info))) {
1598 			put_unused_fd(info.fd);
1599 			fput(file);
1600 			return -EFAULT;
1601 		}
1602 		fd_install(info.fd, file);
1603 		err = 0;
1604 		break;
1605 	}
1606 	default:
1607 		err = -ENOIOCTLCMD;
1608 		break;
1609 	}
1610 
1611 	return err;
1612 }
1613 
1614 static void free_mux(struct rcu_head *rcu)
1615 {
1616 	struct kcm_mux *mux = container_of(rcu,
1617 	    struct kcm_mux, rcu);
1618 
1619 	kmem_cache_free(kcm_muxp, mux);
1620 }
1621 
1622 static void release_mux(struct kcm_mux *mux)
1623 {
1624 	struct kcm_net *knet = mux->knet;
1625 	struct kcm_psock *psock, *tmp_psock;
1626 
1627 	/* Release psocks */
1628 	list_for_each_entry_safe(psock, tmp_psock,
1629 				 &mux->psocks, psock_list) {
1630 		if (!WARN_ON(psock->unattaching))
1631 			kcm_unattach(psock);
1632 	}
1633 
1634 	if (WARN_ON(mux->psocks_cnt))
1635 		return;
1636 
1637 	__skb_queue_purge(&mux->rx_hold_queue);
1638 
1639 	mutex_lock(&knet->mutex);
1640 	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1641 	aggregate_psock_stats(&mux->aggregate_psock_stats,
1642 			      &knet->aggregate_psock_stats);
1643 	aggregate_strp_stats(&mux->aggregate_strp_stats,
1644 			     &knet->aggregate_strp_stats);
1645 	list_del_rcu(&mux->kcm_mux_list);
1646 	knet->count--;
1647 	mutex_unlock(&knet->mutex);
1648 
1649 	call_rcu(&mux->rcu, free_mux);
1650 }
1651 
1652 static void kcm_done(struct kcm_sock *kcm)
1653 {
1654 	struct kcm_mux *mux = kcm->mux;
1655 	struct sock *sk = &kcm->sk;
1656 	int socks_cnt;
1657 
1658 	spin_lock_bh(&mux->rx_lock);
1659 	if (kcm->rx_psock) {
1660 		/* Cleanup in unreserve_rx_kcm */
1661 		WARN_ON(kcm->done);
1662 		kcm->rx_disabled = 1;
1663 		kcm->done = 1;
1664 		spin_unlock_bh(&mux->rx_lock);
1665 		return;
1666 	}
1667 
1668 	if (kcm->rx_wait) {
1669 		list_del(&kcm->wait_rx_list);
1670 		/* paired with lockless reads in kcm_rfree() */
1671 		WRITE_ONCE(kcm->rx_wait, false);
1672 	}
1673 	/* Move any pending receive messages to other kcm sockets */
1674 	requeue_rx_msgs(mux, &sk->sk_receive_queue);
1675 
1676 	spin_unlock_bh(&mux->rx_lock);
1677 
1678 	if (WARN_ON(sk_rmem_alloc_get(sk)))
1679 		return;
1680 
1681 	/* Detach from MUX */
1682 	spin_lock_bh(&mux->lock);
1683 
1684 	list_del(&kcm->kcm_sock_list);
1685 	mux->kcm_socks_cnt--;
1686 	socks_cnt = mux->kcm_socks_cnt;
1687 
1688 	spin_unlock_bh(&mux->lock);
1689 
1690 	if (!socks_cnt) {
1691 		/* We are done with the mux now. */
1692 		release_mux(mux);
1693 	}
1694 
1695 	WARN_ON(kcm->rx_wait);
1696 
1697 	sock_put(&kcm->sk);
1698 }
1699 
1700 /* Called by kcm_release to close a KCM socket.
1701  * If this is the last KCM socket on the MUX, destroy the MUX.
1702  */
1703 static int kcm_release(struct socket *sock)
1704 {
1705 	struct sock *sk = sock->sk;
1706 	struct kcm_sock *kcm;
1707 	struct kcm_mux *mux;
1708 	struct kcm_psock *psock;
1709 
1710 	if (!sk)
1711 		return 0;
1712 
1713 	kcm = kcm_sk(sk);
1714 	mux = kcm->mux;
1715 
1716 	lock_sock(sk);
1717 	sock_orphan(sk);
1718 	kfree_skb(kcm->seq_skb);
1719 
1720 	/* Purge queue under lock to avoid race condition with tx_work trying
1721 	 * to act when queue is nonempty. If tx_work runs after this point
1722 	 * it will just return.
1723 	 */
1724 	__skb_queue_purge(&sk->sk_write_queue);
1725 
1726 	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
1727 	 * get a writespace callback. This prevents further work being queued
1728 	 * from the callback (unbinding the psock occurs after canceling work.
1729 	 */
1730 	kcm->tx_stopped = 1;
1731 
1732 	release_sock(sk);
1733 
1734 	spin_lock_bh(&mux->lock);
1735 	if (kcm->tx_wait) {
1736 		/* Take of tx_wait list, after this point there should be no way
1737 		 * that a psock will be assigned to this kcm.
1738 		 */
1739 		list_del(&kcm->wait_psock_list);
1740 		kcm->tx_wait = false;
1741 	}
1742 	spin_unlock_bh(&mux->lock);
1743 
1744 	/* Cancel work. After this point there should be no outside references
1745 	 * to the kcm socket.
1746 	 */
1747 	cancel_work_sync(&kcm->tx_work);
1748 
1749 	lock_sock(sk);
1750 	psock = kcm->tx_psock;
1751 	if (psock) {
1752 		/* A psock was reserved, so we need to kill it since it
1753 		 * may already have some bytes queued from a message. We
1754 		 * need to do this after removing kcm from tx_wait list.
1755 		 */
1756 		kcm_abort_tx_psock(psock, EPIPE, false);
1757 		unreserve_psock(kcm);
1758 	}
1759 	release_sock(sk);
1760 
1761 	WARN_ON(kcm->tx_wait);
1762 	WARN_ON(kcm->tx_psock);
1763 
1764 	sock->sk = NULL;
1765 
1766 	kcm_done(kcm);
1767 
1768 	return 0;
1769 }
1770 
1771 static const struct proto_ops kcm_dgram_ops = {
1772 	.family =	PF_KCM,
1773 	.owner =	THIS_MODULE,
1774 	.release =	kcm_release,
1775 	.bind =		sock_no_bind,
1776 	.connect =	sock_no_connect,
1777 	.socketpair =	sock_no_socketpair,
1778 	.accept =	sock_no_accept,
1779 	.getname =	sock_no_getname,
1780 	.poll =		datagram_poll,
1781 	.ioctl =	kcm_ioctl,
1782 	.listen =	sock_no_listen,
1783 	.shutdown =	sock_no_shutdown,
1784 	.setsockopt =	kcm_setsockopt,
1785 	.getsockopt =	kcm_getsockopt,
1786 	.sendmsg =	kcm_sendmsg,
1787 	.recvmsg =	kcm_recvmsg,
1788 	.mmap =		sock_no_mmap,
1789 	.splice_eof =	kcm_splice_eof,
1790 	.sendpage =	kcm_sendpage,
1791 };
1792 
1793 static const struct proto_ops kcm_seqpacket_ops = {
1794 	.family =	PF_KCM,
1795 	.owner =	THIS_MODULE,
1796 	.release =	kcm_release,
1797 	.bind =		sock_no_bind,
1798 	.connect =	sock_no_connect,
1799 	.socketpair =	sock_no_socketpair,
1800 	.accept =	sock_no_accept,
1801 	.getname =	sock_no_getname,
1802 	.poll =		datagram_poll,
1803 	.ioctl =	kcm_ioctl,
1804 	.listen =	sock_no_listen,
1805 	.shutdown =	sock_no_shutdown,
1806 	.setsockopt =	kcm_setsockopt,
1807 	.getsockopt =	kcm_getsockopt,
1808 	.sendmsg =	kcm_sendmsg,
1809 	.recvmsg =	kcm_recvmsg,
1810 	.mmap =		sock_no_mmap,
1811 	.splice_eof =	kcm_splice_eof,
1812 	.sendpage =	kcm_sendpage,
1813 	.splice_read =	kcm_splice_read,
1814 };
1815 
1816 /* Create proto operation for kcm sockets */
1817 static int kcm_create(struct net *net, struct socket *sock,
1818 		      int protocol, int kern)
1819 {
1820 	struct kcm_net *knet = net_generic(net, kcm_net_id);
1821 	struct sock *sk;
1822 	struct kcm_mux *mux;
1823 
1824 	switch (sock->type) {
1825 	case SOCK_DGRAM:
1826 		sock->ops = &kcm_dgram_ops;
1827 		break;
1828 	case SOCK_SEQPACKET:
1829 		sock->ops = &kcm_seqpacket_ops;
1830 		break;
1831 	default:
1832 		return -ESOCKTNOSUPPORT;
1833 	}
1834 
1835 	if (protocol != KCMPROTO_CONNECTED)
1836 		return -EPROTONOSUPPORT;
1837 
1838 	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1839 	if (!sk)
1840 		return -ENOMEM;
1841 
1842 	/* Allocate a kcm mux, shared between KCM sockets */
1843 	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1844 	if (!mux) {
1845 		sk_free(sk);
1846 		return -ENOMEM;
1847 	}
1848 
1849 	spin_lock_init(&mux->lock);
1850 	spin_lock_init(&mux->rx_lock);
1851 	INIT_LIST_HEAD(&mux->kcm_socks);
1852 	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1853 	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1854 
1855 	INIT_LIST_HEAD(&mux->psocks);
1856 	INIT_LIST_HEAD(&mux->psocks_ready);
1857 	INIT_LIST_HEAD(&mux->psocks_avail);
1858 
1859 	mux->knet = knet;
1860 
1861 	/* Add new MUX to list */
1862 	mutex_lock(&knet->mutex);
1863 	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1864 	knet->count++;
1865 	mutex_unlock(&knet->mutex);
1866 
1867 	skb_queue_head_init(&mux->rx_hold_queue);
1868 
1869 	/* Init KCM socket */
1870 	sock_init_data(sock, sk);
1871 	init_kcm_sock(kcm_sk(sk), mux);
1872 
1873 	return 0;
1874 }
1875 
1876 static const struct net_proto_family kcm_family_ops = {
1877 	.family = PF_KCM,
1878 	.create = kcm_create,
1879 	.owner  = THIS_MODULE,
1880 };
1881 
1882 static __net_init int kcm_init_net(struct net *net)
1883 {
1884 	struct kcm_net *knet = net_generic(net, kcm_net_id);
1885 
1886 	INIT_LIST_HEAD_RCU(&knet->mux_list);
1887 	mutex_init(&knet->mutex);
1888 
1889 	return 0;
1890 }
1891 
1892 static __net_exit void kcm_exit_net(struct net *net)
1893 {
1894 	struct kcm_net *knet = net_generic(net, kcm_net_id);
1895 
1896 	/* All KCM sockets should be closed at this point, which should mean
1897 	 * that all multiplexors and psocks have been destroyed.
1898 	 */
1899 	WARN_ON(!list_empty(&knet->mux_list));
1900 }
1901 
1902 static struct pernet_operations kcm_net_ops = {
1903 	.init = kcm_init_net,
1904 	.exit = kcm_exit_net,
1905 	.id   = &kcm_net_id,
1906 	.size = sizeof(struct kcm_net),
1907 };
1908 
1909 static int __init kcm_init(void)
1910 {
1911 	int err = -ENOMEM;
1912 
1913 	kcm_muxp = kmem_cache_create("kcm_mux_cache",
1914 				     sizeof(struct kcm_mux), 0,
1915 				     SLAB_HWCACHE_ALIGN, NULL);
1916 	if (!kcm_muxp)
1917 		goto fail;
1918 
1919 	kcm_psockp = kmem_cache_create("kcm_psock_cache",
1920 				       sizeof(struct kcm_psock), 0,
1921 					SLAB_HWCACHE_ALIGN, NULL);
1922 	if (!kcm_psockp)
1923 		goto fail;
1924 
1925 	kcm_wq = create_singlethread_workqueue("kkcmd");
1926 	if (!kcm_wq)
1927 		goto fail;
1928 
1929 	err = proto_register(&kcm_proto, 1);
1930 	if (err)
1931 		goto fail;
1932 
1933 	err = register_pernet_device(&kcm_net_ops);
1934 	if (err)
1935 		goto net_ops_fail;
1936 
1937 	err = sock_register(&kcm_family_ops);
1938 	if (err)
1939 		goto sock_register_fail;
1940 
1941 	err = kcm_proc_init();
1942 	if (err)
1943 		goto proc_init_fail;
1944 
1945 	return 0;
1946 
1947 proc_init_fail:
1948 	sock_unregister(PF_KCM);
1949 
1950 sock_register_fail:
1951 	unregister_pernet_device(&kcm_net_ops);
1952 
1953 net_ops_fail:
1954 	proto_unregister(&kcm_proto);
1955 
1956 fail:
1957 	kmem_cache_destroy(kcm_muxp);
1958 	kmem_cache_destroy(kcm_psockp);
1959 
1960 	if (kcm_wq)
1961 		destroy_workqueue(kcm_wq);
1962 
1963 	return err;
1964 }
1965 
1966 static void __exit kcm_exit(void)
1967 {
1968 	kcm_proc_exit();
1969 	sock_unregister(PF_KCM);
1970 	unregister_pernet_device(&kcm_net_ops);
1971 	proto_unregister(&kcm_proto);
1972 	destroy_workqueue(kcm_wq);
1973 
1974 	kmem_cache_destroy(kcm_muxp);
1975 	kmem_cache_destroy(kcm_psockp);
1976 }
1977 
1978 module_init(kcm_init);
1979 module_exit(kcm_exit);
1980 
1981 MODULE_LICENSE("GPL");
1982 MODULE_ALIAS_NETPROTO(PF_KCM);
1983