xref: /linux/net/mptcp/protocol.c (revision 0061b5199d7c81076181a64529f7a799ebb89399)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
14 #include <net/aligned_data.h>
15 #include <net/rps.h>
16 #include <net/sock.h>
17 #include <net/inet_common.h>
18 #include <net/inet_hashtables.h>
19 #include <net/protocol.h>
20 #include <net/tcp_states.h>
21 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
22 #include <net/transp_v6.h>
23 #endif
24 #include <net/mptcp.h>
25 #include <net/hotdata.h>
26 #include <net/xfrm.h>
27 #include <asm/ioctls.h>
28 #include "protocol.h"
29 #include "mib.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/mptcp.h>
33 
34 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
35 struct mptcp6_sock {
36 	struct mptcp_sock msk;
37 	struct ipv6_pinfo np;
38 };
39 #endif
40 
41 enum {
42 	MPTCP_CMSG_TS = BIT(0),
43 	MPTCP_CMSG_INQ = BIT(1),
44 };
45 
46 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
47 
48 static void __mptcp_destroy_sock(struct sock *sk);
49 static void mptcp_check_send_data_fin(struct sock *sk);
50 
51 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = {
52 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
53 };
54 static struct net_device *mptcp_napi_dev;
55 
56 /* Returns end sequence number of the receiver's advertised window */
57 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
58 {
59 	return READ_ONCE(msk->wnd_end);
60 }
61 
62 static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
63 {
64 	unsigned short family = READ_ONCE(sk->sk_family);
65 
66 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
67 	if (family == AF_INET6)
68 		return &inet6_stream_ops;
69 #endif
70 	WARN_ON_ONCE(family != AF_INET);
71 	return &inet_stream_ops;
72 }
73 
74 bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
75 {
76 	struct net *net = sock_net((struct sock *)msk);
77 
78 	if (__mptcp_check_fallback(msk))
79 		return true;
80 
81 	/* The caller possibly is not holding the msk socket lock, but
82 	 * in the fallback case only the current subflow is touching
83 	 * the OoO queue.
84 	 */
85 	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
86 		return false;
87 
88 	spin_lock_bh(&msk->fallback_lock);
89 	if (!msk->allow_infinite_fallback) {
90 		spin_unlock_bh(&msk->fallback_lock);
91 		return false;
92 	}
93 
94 	msk->allow_subflows = false;
95 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
96 	__MPTCP_INC_STATS(net, fb_mib);
97 	spin_unlock_bh(&msk->fallback_lock);
98 	return true;
99 }
100 
101 static int __mptcp_socket_create(struct mptcp_sock *msk)
102 {
103 	struct mptcp_subflow_context *subflow;
104 	struct sock *sk = (struct sock *)msk;
105 	struct socket *ssock;
106 	int err;
107 
108 	err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock);
109 	if (err)
110 		return err;
111 
112 	msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio;
113 	WRITE_ONCE(msk->first, ssock->sk);
114 	subflow = mptcp_subflow_ctx(ssock->sk);
115 	list_add(&subflow->node, &msk->conn_list);
116 	sock_hold(ssock->sk);
117 	subflow->request_mptcp = 1;
118 	subflow->subflow_id = msk->subflow_id++;
119 
120 	/* This is the first subflow, always with id 0 */
121 	WRITE_ONCE(subflow->local_id, 0);
122 	mptcp_sock_graft(msk->first, sk->sk_socket);
123 	iput(SOCK_INODE(ssock));
124 
125 	return 0;
126 }
127 
128 /* If the MPC handshake is not started, returns the first subflow,
129  * eventually allocating it.
130  */
131 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
132 {
133 	struct sock *sk = (struct sock *)msk;
134 	int ret;
135 
136 	if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
137 		return ERR_PTR(-EINVAL);
138 
139 	if (!msk->first) {
140 		ret = __mptcp_socket_create(msk);
141 		if (ret)
142 			return ERR_PTR(ret);
143 	}
144 
145 	return msk->first;
146 }
147 
148 static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
149 {
150 	sk_drops_skbadd(sk, skb);
151 	__kfree_skb(skb);
152 }
153 
154 static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
155 				 struct sk_buff *from, bool *fragstolen,
156 				 int *delta)
157 {
158 	int limit = READ_ONCE(sk->sk_rcvbuf);
159 
160 	if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) ||
161 	    MPTCP_SKB_CB(from)->offset ||
162 	    ((to->len + from->len) > (limit >> 3)) ||
163 	    !skb_try_coalesce(to, from, fragstolen, delta))
164 		return false;
165 
166 	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
167 		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
168 		 to->len, MPTCP_SKB_CB(from)->end_seq);
169 	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
170 	return true;
171 }
172 
173 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
174 			       struct sk_buff *from)
175 {
176 	bool fragstolen;
177 	int delta;
178 
179 	if (!__mptcp_try_coalesce(sk, to, from, &fragstolen, &delta))
180 		return false;
181 
182 	/* note the fwd memory can reach a negative value after accounting
183 	 * for the delta, but the later skb free will restore a non
184 	 * negative one
185 	 */
186 	atomic_add(delta, &sk->sk_rmem_alloc);
187 	sk_mem_charge(sk, delta);
188 	kfree_skb_partial(from, fragstolen);
189 
190 	return true;
191 }
192 
193 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
194 				   struct sk_buff *from)
195 {
196 	if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
197 		return false;
198 
199 	return mptcp_try_coalesce((struct sock *)msk, to, from);
200 }
201 
202 /* "inspired" by tcp_rcvbuf_grow(), main difference:
203  * - mptcp does not maintain a msk-level window clamp
204  * - returns true when  the receive buffer is actually updated
205  */
206 static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
207 {
208 	struct mptcp_sock *msk = mptcp_sk(sk);
209 	const struct net *net = sock_net(sk);
210 	u32 rcvwin, rcvbuf, cap, oldval;
211 	u64 grow;
212 
213 	oldval = msk->rcvq_space.space;
214 	msk->rcvq_space.space = newval;
215 	if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
216 	    (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
217 		return false;
218 
219 	/* DRS is always one RTT late. */
220 	rcvwin = newval << 1;
221 
222 	/* slow start: allow the sender to double its rate. */
223 	grow = (u64)rcvwin * (newval - oldval);
224 	do_div(grow, oldval);
225 	rcvwin += grow << 1;
226 
227 	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
228 		rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq;
229 
230 	cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
231 
232 	rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap);
233 	if (rcvbuf > sk->sk_rcvbuf) {
234 		WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
235 		return true;
236 	}
237 	return false;
238 }
239 
240 /* "inspired" by tcp_data_queue_ofo(), main differences:
241  * - use mptcp seqs
242  * - don't cope with sacks
243  */
244 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
245 {
246 	struct sock *sk = (struct sock *)msk;
247 	struct rb_node **p, *parent;
248 	u64 seq, end_seq, max_seq;
249 	struct sk_buff *skb1;
250 
251 	seq = MPTCP_SKB_CB(skb)->map_seq;
252 	end_seq = MPTCP_SKB_CB(skb)->end_seq;
253 	max_seq = atomic64_read(&msk->rcv_wnd_sent);
254 
255 	pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
256 		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
257 	if (after64(end_seq, max_seq)) {
258 		/* out of window */
259 		mptcp_drop(sk, skb);
260 		pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
261 			 (unsigned long long)end_seq - (unsigned long)max_seq,
262 			 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent));
263 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
264 		return;
265 	}
266 
267 	p = &msk->out_of_order_queue.rb_node;
268 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
269 	if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
270 		rb_link_node(&skb->rbnode, NULL, p);
271 		rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
272 		msk->ooo_last_skb = skb;
273 		goto end;
274 	}
275 
276 	/* with 2 subflows, adding at end of ooo queue is quite likely
277 	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
278 	 */
279 	if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
280 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
281 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
282 		return;
283 	}
284 
285 	/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
286 	if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
287 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
288 		parent = &msk->ooo_last_skb->rbnode;
289 		p = &parent->rb_right;
290 		goto insert;
291 	}
292 
293 	/* Find place to insert this segment. Handle overlaps on the way. */
294 	parent = NULL;
295 	while (*p) {
296 		parent = *p;
297 		skb1 = rb_to_skb(parent);
298 		if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
299 			p = &parent->rb_left;
300 			continue;
301 		}
302 		if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
303 			if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
304 				/* All the bits are present. Drop. */
305 				mptcp_drop(sk, skb);
306 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
307 				return;
308 			}
309 			if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
310 				/* partial overlap:
311 				 *     |     skb      |
312 				 *  |     skb1    |
313 				 * continue traversing
314 				 */
315 			} else {
316 				/* skb's seq == skb1's seq and skb covers skb1.
317 				 * Replace skb1 with skb.
318 				 */
319 				rb_replace_node(&skb1->rbnode, &skb->rbnode,
320 						&msk->out_of_order_queue);
321 				mptcp_drop(sk, skb1);
322 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
323 				goto merge_right;
324 			}
325 		} else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
326 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
327 			return;
328 		}
329 		p = &parent->rb_right;
330 	}
331 
332 insert:
333 	/* Insert segment into RB tree. */
334 	rb_link_node(&skb->rbnode, parent, p);
335 	rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
336 
337 merge_right:
338 	/* Remove other segments covered by skb. */
339 	while ((skb1 = skb_rb_next(skb)) != NULL) {
340 		if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
341 			break;
342 		rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
343 		mptcp_drop(sk, skb1);
344 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
345 	}
346 	/* If there is no skb after us, we are the last_skb ! */
347 	if (!skb1)
348 		msk->ooo_last_skb = skb;
349 
350 end:
351 	skb_condense(skb);
352 	skb_set_owner_r(skb, sk);
353 	/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
354 	if (sk->sk_socket)
355 		mptcp_rcvbuf_grow(sk, msk->rcvq_space.space);
356 }
357 
358 static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
359 			   int copy_len)
360 {
361 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
362 	bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
363 
364 	/* the skb map_seq accounts for the skb offset:
365 	 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
366 	 * value
367 	 */
368 	MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
369 	MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
370 	MPTCP_SKB_CB(skb)->offset = offset;
371 	MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
372 	MPTCP_SKB_CB(skb)->cant_coalesce = 0;
373 
374 	__skb_unlink(skb, &ssk->sk_receive_queue);
375 
376 	skb_ext_reset(skb);
377 	skb_dst_drop(skb);
378 }
379 
380 static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
381 {
382 	u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq;
383 	struct mptcp_sock *msk = mptcp_sk(sk);
384 	struct sk_buff *tail;
385 
386 	mptcp_borrow_fwdmem(sk, skb);
387 
388 	if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
389 		/* in sequence */
390 		msk->bytes_received += copy_len;
391 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
392 		tail = skb_peek_tail(&sk->sk_receive_queue);
393 		if (tail && mptcp_try_coalesce(sk, tail, skb))
394 			return true;
395 
396 		skb_set_owner_r(skb, sk);
397 		__skb_queue_tail(&sk->sk_receive_queue, skb);
398 		return true;
399 	} else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
400 		mptcp_data_queue_ofo(msk, skb);
401 		return false;
402 	}
403 
404 	/* old data, keep it simple and drop the whole pkt, sender
405 	 * will retransmit as needed, if needed.
406 	 */
407 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
408 	mptcp_drop(sk, skb);
409 	return false;
410 }
411 
412 static void mptcp_stop_rtx_timer(struct sock *sk)
413 {
414 	sk_stop_timer(sk, &sk->mptcp_retransmit_timer);
415 	mptcp_sk(sk)->timer_ival = 0;
416 }
417 
418 static void mptcp_close_wake_up(struct sock *sk)
419 {
420 	if (sock_flag(sk, SOCK_DEAD))
421 		return;
422 
423 	sk->sk_state_change(sk);
424 	if (sk->sk_shutdown == SHUTDOWN_MASK ||
425 	    sk->sk_state == TCP_CLOSE)
426 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
427 	else
428 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
429 }
430 
431 static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
432 {
433 	struct mptcp_subflow_context *subflow;
434 
435 	mptcp_for_each_subflow(msk, subflow) {
436 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
437 		bool slow;
438 
439 		slow = lock_sock_fast(ssk);
440 		tcp_shutdown(ssk, SEND_SHUTDOWN);
441 		unlock_sock_fast(ssk, slow);
442 	}
443 }
444 
445 /* called under the msk socket lock */
446 static bool mptcp_pending_data_fin_ack(struct sock *sk)
447 {
448 	struct mptcp_sock *msk = mptcp_sk(sk);
449 
450 	return ((1 << sk->sk_state) &
451 		(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
452 	       msk->write_seq == READ_ONCE(msk->snd_una);
453 }
454 
455 static void mptcp_check_data_fin_ack(struct sock *sk)
456 {
457 	struct mptcp_sock *msk = mptcp_sk(sk);
458 
459 	/* Look for an acknowledged DATA_FIN */
460 	if (mptcp_pending_data_fin_ack(sk)) {
461 		WRITE_ONCE(msk->snd_data_fin_enable, 0);
462 
463 		switch (sk->sk_state) {
464 		case TCP_FIN_WAIT1:
465 			mptcp_set_state(sk, TCP_FIN_WAIT2);
466 			break;
467 		case TCP_CLOSING:
468 		case TCP_LAST_ACK:
469 			mptcp_shutdown_subflows(msk);
470 			mptcp_set_state(sk, TCP_CLOSE);
471 			break;
472 		}
473 
474 		mptcp_close_wake_up(sk);
475 	}
476 }
477 
478 /* can be called with no lock acquired */
479 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
480 {
481 	struct mptcp_sock *msk = mptcp_sk(sk);
482 
483 	if (READ_ONCE(msk->rcv_data_fin) &&
484 	    ((1 << inet_sk_state_load(sk)) &
485 	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
486 		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
487 
488 		if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
489 			if (seq)
490 				*seq = rcv_data_fin_seq;
491 
492 			return true;
493 		}
494 	}
495 
496 	return false;
497 }
498 
499 static void mptcp_set_datafin_timeout(struct sock *sk)
500 {
501 	struct inet_connection_sock *icsk = inet_csk(sk);
502 	u32 retransmits;
503 
504 	retransmits = min_t(u32, icsk->icsk_retransmits,
505 			    ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
506 
507 	mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
508 }
509 
510 static void __mptcp_set_timeout(struct sock *sk, long tout)
511 {
512 	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
513 }
514 
515 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
516 {
517 	const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
518 
519 	return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
520 	       tcp_timeout_expires(ssk) - jiffies : 0;
521 }
522 
523 static void mptcp_set_timeout(struct sock *sk)
524 {
525 	struct mptcp_subflow_context *subflow;
526 	long tout = 0;
527 
528 	mptcp_for_each_subflow(mptcp_sk(sk), subflow)
529 		tout = max(tout, mptcp_timeout_from_subflow(subflow));
530 	__mptcp_set_timeout(sk, tout);
531 }
532 
533 static inline bool tcp_can_send_ack(const struct sock *ssk)
534 {
535 	return !((1 << inet_sk_state_load(ssk)) &
536 	       (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
537 }
538 
539 void __mptcp_subflow_send_ack(struct sock *ssk)
540 {
541 	if (tcp_can_send_ack(ssk))
542 		tcp_send_ack(ssk);
543 }
544 
545 static void mptcp_subflow_send_ack(struct sock *ssk)
546 {
547 	bool slow;
548 
549 	slow = lock_sock_fast(ssk);
550 	__mptcp_subflow_send_ack(ssk);
551 	unlock_sock_fast(ssk, slow);
552 }
553 
554 static void mptcp_send_ack(struct mptcp_sock *msk)
555 {
556 	struct mptcp_subflow_context *subflow;
557 
558 	mptcp_for_each_subflow(msk, subflow)
559 		mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
560 }
561 
562 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
563 {
564 	bool slow;
565 
566 	slow = lock_sock_fast(ssk);
567 	if (tcp_can_send_ack(ssk))
568 		tcp_cleanup_rbuf(ssk, copied);
569 	unlock_sock_fast(ssk, slow);
570 }
571 
572 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
573 {
574 	const struct inet_connection_sock *icsk = inet_csk(ssk);
575 	u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
576 	const struct tcp_sock *tp = tcp_sk(ssk);
577 
578 	return (ack_pending & ICSK_ACK_SCHED) &&
579 		((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
580 		  READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
581 		 (rx_empty && ack_pending &
582 			      (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
583 }
584 
585 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
586 {
587 	int old_space = READ_ONCE(msk->old_wspace);
588 	struct mptcp_subflow_context *subflow;
589 	struct sock *sk = (struct sock *)msk;
590 	int space =  __mptcp_space(sk);
591 	bool cleanup, rx_empty;
592 
593 	cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
594 	rx_empty = !sk_rmem_alloc_get(sk) && copied;
595 
596 	mptcp_for_each_subflow(msk, subflow) {
597 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
598 
599 		if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
600 			mptcp_subflow_cleanup_rbuf(ssk, copied);
601 	}
602 }
603 
604 static void mptcp_check_data_fin(struct sock *sk)
605 {
606 	struct mptcp_sock *msk = mptcp_sk(sk);
607 	u64 rcv_data_fin_seq;
608 
609 	/* Need to ack a DATA_FIN received from a peer while this side
610 	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
611 	 * msk->rcv_data_fin was set when parsing the incoming options
612 	 * at the subflow level and the msk lock was not held, so this
613 	 * is the first opportunity to act on the DATA_FIN and change
614 	 * the msk state.
615 	 *
616 	 * If we are caught up to the sequence number of the incoming
617 	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
618 	 * not caught up, do nothing and let the recv code send DATA_ACK
619 	 * when catching up.
620 	 */
621 
622 	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
623 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
624 		WRITE_ONCE(msk->rcv_data_fin, 0);
625 
626 		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
627 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
628 
629 		switch (sk->sk_state) {
630 		case TCP_ESTABLISHED:
631 			mptcp_set_state(sk, TCP_CLOSE_WAIT);
632 			break;
633 		case TCP_FIN_WAIT1:
634 			mptcp_set_state(sk, TCP_CLOSING);
635 			break;
636 		case TCP_FIN_WAIT2:
637 			mptcp_shutdown_subflows(msk);
638 			mptcp_set_state(sk, TCP_CLOSE);
639 			break;
640 		default:
641 			/* Other states not expected */
642 			WARN_ON_ONCE(1);
643 			break;
644 		}
645 
646 		if (!__mptcp_check_fallback(msk))
647 			mptcp_send_ack(msk);
648 		mptcp_close_wake_up(sk);
649 	}
650 }
651 
652 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
653 {
654 	if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) {
655 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
656 		mptcp_subflow_reset(ssk);
657 	}
658 }
659 
660 static void __mptcp_add_backlog(struct sock *sk,
661 				struct mptcp_subflow_context *subflow,
662 				struct sk_buff *skb)
663 {
664 	struct mptcp_sock *msk = mptcp_sk(sk);
665 	struct sk_buff *tail = NULL;
666 	struct sock *ssk = skb->sk;
667 	bool fragstolen;
668 	int delta;
669 
670 	if (unlikely(sk->sk_state == TCP_CLOSE)) {
671 		kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
672 		return;
673 	}
674 
675 	/* Try to coalesce with the last skb in our backlog */
676 	if (!list_empty(&msk->backlog_list))
677 		tail = list_last_entry(&msk->backlog_list, struct sk_buff, list);
678 
679 	if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq &&
680 	    ssk == tail->sk &&
681 	    __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) {
682 		skb->truesize -= delta;
683 		kfree_skb_partial(skb, fragstolen);
684 		__mptcp_subflow_lend_fwdmem(subflow, delta);
685 		goto account;
686 	}
687 
688 	list_add_tail(&skb->list, &msk->backlog_list);
689 	mptcp_subflow_lend_fwdmem(subflow, skb);
690 	delta = skb->truesize;
691 
692 account:
693 	WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
694 
695 	/* Possibly not accept()ed yet, keep track of memory not CG
696 	 * accounted, mptcp_graft_subflows() will handle it.
697 	 */
698 	if (!mem_cgroup_from_sk(ssk))
699 		msk->backlog_unaccounted += delta;
700 }
701 
702 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
703 					   struct sock *ssk, bool own_msk)
704 {
705 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
706 	struct sock *sk = (struct sock *)msk;
707 	bool more_data_avail;
708 	struct tcp_sock *tp;
709 	bool ret = false;
710 
711 	pr_debug("msk=%p ssk=%p\n", msk, ssk);
712 	tp = tcp_sk(ssk);
713 	do {
714 		u32 map_remaining, offset;
715 		u32 seq = tp->copied_seq;
716 		struct sk_buff *skb;
717 		bool fin;
718 
719 		/* try to move as much data as available */
720 		map_remaining = subflow->map_data_len -
721 				mptcp_subflow_get_map_offset(subflow);
722 
723 		skb = skb_peek(&ssk->sk_receive_queue);
724 		if (unlikely(!skb))
725 			break;
726 
727 		if (__mptcp_check_fallback(msk)) {
728 			/* Under fallback skbs have no MPTCP extension and TCP could
729 			 * collapse them between the dummy map creation and the
730 			 * current dequeue. Be sure to adjust the map size.
731 			 */
732 			map_remaining = skb->len;
733 			subflow->map_data_len = skb->len;
734 		}
735 
736 		offset = seq - TCP_SKB_CB(skb)->seq;
737 		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
738 		if (fin)
739 			seq++;
740 
741 		if (offset < skb->len) {
742 			size_t len = skb->len - offset;
743 
744 			mptcp_init_skb(ssk, skb, offset, len);
745 
746 			if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) {
747 				mptcp_subflow_lend_fwdmem(subflow, skb);
748 				ret |= __mptcp_move_skb(sk, skb);
749 			} else {
750 				__mptcp_add_backlog(sk, subflow, skb);
751 			}
752 			seq += len;
753 
754 			if (unlikely(map_remaining < len)) {
755 				DEBUG_NET_WARN_ON_ONCE(1);
756 				mptcp_dss_corruption(msk, ssk);
757 			}
758 		} else {
759 			if (unlikely(!fin)) {
760 				DEBUG_NET_WARN_ON_ONCE(1);
761 				mptcp_dss_corruption(msk, ssk);
762 			}
763 
764 			sk_eat_skb(ssk, skb);
765 		}
766 
767 		WRITE_ONCE(tp->copied_seq, seq);
768 		more_data_avail = mptcp_subflow_data_available(ssk);
769 
770 	} while (more_data_avail);
771 
772 	if (ret)
773 		msk->last_data_recv = tcp_jiffies32;
774 	return ret;
775 }
776 
777 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
778 {
779 	struct sock *sk = (struct sock *)msk;
780 	struct sk_buff *skb, *tail;
781 	bool moved = false;
782 	struct rb_node *p;
783 	u64 end_seq;
784 
785 	p = rb_first(&msk->out_of_order_queue);
786 	pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
787 	while (p) {
788 		skb = rb_to_skb(p);
789 		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
790 			break;
791 
792 		p = rb_next(p);
793 		rb_erase(&skb->rbnode, &msk->out_of_order_queue);
794 
795 		if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
796 				      msk->ack_seq))) {
797 			mptcp_drop(sk, skb);
798 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
799 			continue;
800 		}
801 
802 		end_seq = MPTCP_SKB_CB(skb)->end_seq;
803 		tail = skb_peek_tail(&sk->sk_receive_queue);
804 		if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
805 			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
806 
807 			/* skip overlapping data, if any */
808 			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
809 				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
810 				 delta);
811 			MPTCP_SKB_CB(skb)->offset += delta;
812 			MPTCP_SKB_CB(skb)->map_seq += delta;
813 			__skb_queue_tail(&sk->sk_receive_queue, skb);
814 		}
815 		msk->bytes_received += end_seq - msk->ack_seq;
816 		WRITE_ONCE(msk->ack_seq, end_seq);
817 		moved = true;
818 	}
819 	return moved;
820 }
821 
822 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
823 {
824 	int ssk_state;
825 	int err;
826 
827 	/* only propagate errors on fallen-back sockets or
828 	 * on MPC connect
829 	 */
830 	if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk)))
831 		return false;
832 
833 	err = sock_error(ssk);
834 	if (!err)
835 		return false;
836 
837 	/* We need to propagate only transition to CLOSE state.
838 	 * Orphaned socket will see such state change via
839 	 * subflow_sched_work_if_closed() and that path will properly
840 	 * destroy the msk as needed.
841 	 */
842 	ssk_state = inet_sk_state_load(ssk);
843 	if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
844 		mptcp_set_state(sk, ssk_state);
845 	WRITE_ONCE(sk->sk_err, -err);
846 
847 	/* This barrier is coupled with smp_rmb() in mptcp_poll() */
848 	smp_wmb();
849 	sk_error_report(sk);
850 	return true;
851 }
852 
853 void __mptcp_error_report(struct sock *sk)
854 {
855 	struct mptcp_subflow_context *subflow;
856 	struct mptcp_sock *msk = mptcp_sk(sk);
857 
858 	mptcp_for_each_subflow(msk, subflow)
859 		if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
860 			break;
861 }
862 
863 /* In most cases we will be able to lock the mptcp socket.  If its already
864  * owned, we need to defer to the work queue to avoid ABBA deadlock.
865  */
866 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
867 {
868 	struct sock *sk = (struct sock *)msk;
869 	bool moved;
870 
871 	moved = __mptcp_move_skbs_from_subflow(msk, ssk, true);
872 	__mptcp_ofo_queue(msk);
873 	if (unlikely(ssk->sk_err))
874 		__mptcp_subflow_error_report(sk, ssk);
875 
876 	/* If the moves have caught up with the DATA_FIN sequence number
877 	 * it's time to ack the DATA_FIN and change socket state, but
878 	 * this is not a good place to change state. Let the workqueue
879 	 * do it.
880 	 */
881 	if (mptcp_pending_data_fin(sk, NULL))
882 		mptcp_schedule_work(sk);
883 	return moved;
884 }
885 
886 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
887 {
888 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
889 	struct mptcp_sock *msk = mptcp_sk(sk);
890 
891 	/* The peer can send data while we are shutting down this
892 	 * subflow at subflow destruction time, but we must avoid enqueuing
893 	 * more data to the msk receive queue
894 	 */
895 	if (unlikely(subflow->closing))
896 		return;
897 
898 	mptcp_data_lock(sk);
899 	if (!sock_owned_by_user(sk)) {
900 		/* Wake-up the reader only for in-sequence data */
901 		if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
902 			sk->sk_data_ready(sk);
903 	} else {
904 		__mptcp_move_skbs_from_subflow(msk, ssk, false);
905 	}
906 	mptcp_data_unlock(sk);
907 }
908 
909 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
910 {
911 	mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
912 	msk->allow_infinite_fallback = false;
913 	mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
914 }
915 
916 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
917 {
918 	struct sock *sk = (struct sock *)msk;
919 
920 	if (sk->sk_state != TCP_ESTABLISHED)
921 		return false;
922 
923 	spin_lock_bh(&msk->fallback_lock);
924 	if (!msk->allow_subflows) {
925 		spin_unlock_bh(&msk->fallback_lock);
926 		return false;
927 	}
928 	mptcp_subflow_joined(msk, ssk);
929 	spin_unlock_bh(&msk->fallback_lock);
930 
931 	mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
932 	mptcp_sockopt_sync_locked(msk, ssk);
933 	mptcp_stop_tout_timer(sk);
934 	__mptcp_propagate_sndbuf(sk, ssk);
935 	return true;
936 }
937 
938 static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list)
939 {
940 	struct mptcp_subflow_context *tmp, *subflow;
941 	struct mptcp_sock *msk = mptcp_sk(sk);
942 
943 	list_for_each_entry_safe(subflow, tmp, join_list, node) {
944 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
945 		bool slow = lock_sock_fast(ssk);
946 
947 		list_move_tail(&subflow->node, &msk->conn_list);
948 		if (!__mptcp_finish_join(msk, ssk))
949 			mptcp_subflow_reset(ssk);
950 		unlock_sock_fast(ssk, slow);
951 	}
952 }
953 
954 static bool mptcp_rtx_timer_pending(struct sock *sk)
955 {
956 	return timer_pending(&sk->mptcp_retransmit_timer);
957 }
958 
959 static void mptcp_reset_rtx_timer(struct sock *sk)
960 {
961 	unsigned long tout;
962 
963 	/* prevent rescheduling on close */
964 	if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
965 		return;
966 
967 	tout = mptcp_sk(sk)->timer_ival;
968 	sk_reset_timer(sk, &sk->mptcp_retransmit_timer, jiffies + tout);
969 }
970 
971 bool mptcp_schedule_work(struct sock *sk)
972 {
973 	if (inet_sk_state_load(sk) == TCP_CLOSE)
974 		return false;
975 
976 	/* Get a reference on this socket, mptcp_worker() will release it.
977 	 * As mptcp_worker() might complete before us, we can not avoid
978 	 * a sock_hold()/sock_put() if schedule_work() returns false.
979 	 */
980 	sock_hold(sk);
981 
982 	if (schedule_work(&mptcp_sk(sk)->work))
983 		return true;
984 
985 	sock_put(sk);
986 	return false;
987 }
988 
989 static bool mptcp_skb_can_collapse_to(u64 write_seq,
990 				      const struct sk_buff *skb,
991 				      const struct mptcp_ext *mpext)
992 {
993 	if (!tcp_skb_can_collapse_to(skb))
994 		return false;
995 
996 	/* can collapse only if MPTCP level sequence is in order and this
997 	 * mapping has not been xmitted yet
998 	 */
999 	return mpext && mpext->data_seq + mpext->data_len == write_seq &&
1000 	       !mpext->frozen;
1001 }
1002 
1003 /* we can append data to the given data frag if:
1004  * - there is space available in the backing page_frag
1005  * - the data frag tail matches the current page_frag free offset
1006  * - the data frag end sequence number matches the current write seq
1007  */
1008 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
1009 				       const struct page_frag *pfrag,
1010 				       const struct mptcp_data_frag *df)
1011 {
1012 	return df && pfrag->page == df->page &&
1013 		pfrag->size - pfrag->offset > 0 &&
1014 		pfrag->offset == (df->offset + df->data_len) &&
1015 		df->data_seq + df->data_len == msk->write_seq;
1016 }
1017 
1018 static void dfrag_uncharge(struct sock *sk, int len)
1019 {
1020 	sk_mem_uncharge(sk, len);
1021 	sk_wmem_queued_add(sk, -len);
1022 }
1023 
1024 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
1025 {
1026 	int len = dfrag->data_len + dfrag->overhead;
1027 
1028 	list_del(&dfrag->list);
1029 	dfrag_uncharge(sk, len);
1030 	put_page(dfrag->page);
1031 }
1032 
1033 /* called under both the msk socket lock and the data lock */
1034 static void __mptcp_clean_una(struct sock *sk)
1035 {
1036 	struct mptcp_sock *msk = mptcp_sk(sk);
1037 	struct mptcp_data_frag *dtmp, *dfrag;
1038 	u64 snd_una;
1039 
1040 	snd_una = msk->snd_una;
1041 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1042 		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1043 			break;
1044 
1045 		if (unlikely(dfrag == msk->first_pending)) {
1046 			/* in recovery mode can see ack after the current snd head */
1047 			if (WARN_ON_ONCE(!msk->recovery))
1048 				break;
1049 
1050 			msk->first_pending = mptcp_send_next(sk);
1051 		}
1052 
1053 		dfrag_clear(sk, dfrag);
1054 	}
1055 
1056 	dfrag = mptcp_rtx_head(sk);
1057 	if (dfrag && after64(snd_una, dfrag->data_seq)) {
1058 		u64 delta = snd_una - dfrag->data_seq;
1059 
1060 		/* prevent wrap around in recovery mode */
1061 		if (unlikely(delta > dfrag->already_sent)) {
1062 			if (WARN_ON_ONCE(!msk->recovery))
1063 				goto out;
1064 			if (WARN_ON_ONCE(delta > dfrag->data_len))
1065 				goto out;
1066 			dfrag->already_sent += delta - dfrag->already_sent;
1067 		}
1068 
1069 		dfrag->data_seq += delta;
1070 		dfrag->offset += delta;
1071 		dfrag->data_len -= delta;
1072 		dfrag->already_sent -= delta;
1073 
1074 		dfrag_uncharge(sk, delta);
1075 	}
1076 
1077 	/* all retransmitted data acked, recovery completed */
1078 	if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1079 		msk->recovery = false;
1080 
1081 out:
1082 	if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
1083 		if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1084 			mptcp_stop_rtx_timer(sk);
1085 	} else {
1086 		mptcp_reset_rtx_timer(sk);
1087 	}
1088 
1089 	if (mptcp_pending_data_fin_ack(sk))
1090 		mptcp_schedule_work(sk);
1091 }
1092 
1093 static void __mptcp_clean_una_wakeup(struct sock *sk)
1094 {
1095 	lockdep_assert_held_once(&sk->sk_lock.slock);
1096 
1097 	__mptcp_clean_una(sk);
1098 	mptcp_write_space(sk);
1099 }
1100 
1101 static void mptcp_clean_una_wakeup(struct sock *sk)
1102 {
1103 	mptcp_data_lock(sk);
1104 	__mptcp_clean_una_wakeup(sk);
1105 	mptcp_data_unlock(sk);
1106 }
1107 
1108 static void mptcp_enter_memory_pressure(struct sock *sk)
1109 {
1110 	struct mptcp_subflow_context *subflow;
1111 	struct mptcp_sock *msk = mptcp_sk(sk);
1112 	bool first = true;
1113 
1114 	mptcp_for_each_subflow(msk, subflow) {
1115 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1116 
1117 		if (first && !ssk->sk_bypass_prot_mem) {
1118 			tcp_enter_memory_pressure(ssk);
1119 			first = false;
1120 		}
1121 
1122 		sk_stream_moderate_sndbuf(ssk);
1123 	}
1124 	__mptcp_sync_sndbuf(sk);
1125 }
1126 
1127 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1128  * data
1129  */
1130 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1131 {
1132 	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1133 					pfrag, sk->sk_allocation)))
1134 		return true;
1135 
1136 	mptcp_enter_memory_pressure(sk);
1137 	return false;
1138 }
1139 
1140 static struct mptcp_data_frag *
1141 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1142 		      int orig_offset)
1143 {
1144 	int offset = ALIGN(orig_offset, sizeof(long));
1145 	struct mptcp_data_frag *dfrag;
1146 
1147 	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1148 	dfrag->data_len = 0;
1149 	dfrag->data_seq = msk->write_seq;
1150 	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1151 	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1152 	dfrag->already_sent = 0;
1153 	dfrag->page = pfrag->page;
1154 
1155 	return dfrag;
1156 }
1157 
1158 struct mptcp_sendmsg_info {
1159 	int mss_now;
1160 	int size_goal;
1161 	u16 limit;
1162 	u16 sent;
1163 	unsigned int flags;
1164 	bool data_lock_held;
1165 };
1166 
1167 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk,
1168 				    u64 data_seq, int avail_size)
1169 {
1170 	u64 window_end = mptcp_wnd_end(msk);
1171 	u64 mptcp_snd_wnd;
1172 
1173 	if (__mptcp_check_fallback(msk))
1174 		return avail_size;
1175 
1176 	mptcp_snd_wnd = window_end - data_seq;
1177 	avail_size = min_t(unsigned int, mptcp_snd_wnd, avail_size);
1178 
1179 	if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) {
1180 		tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd);
1181 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED);
1182 	}
1183 
1184 	return avail_size;
1185 }
1186 
1187 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1188 {
1189 	struct skb_ext *mpext = __skb_ext_alloc(gfp);
1190 
1191 	if (!mpext)
1192 		return false;
1193 	__skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1194 	return true;
1195 }
1196 
1197 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1198 {
1199 	struct sk_buff *skb;
1200 
1201 	skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1202 	if (likely(skb)) {
1203 		if (likely(__mptcp_add_ext(skb, gfp))) {
1204 			skb_reserve(skb, MAX_TCP_HEADER);
1205 			skb->ip_summed = CHECKSUM_PARTIAL;
1206 			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
1207 			return skb;
1208 		}
1209 		__kfree_skb(skb);
1210 	} else {
1211 		mptcp_enter_memory_pressure(sk);
1212 	}
1213 	return NULL;
1214 }
1215 
1216 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1217 {
1218 	struct sk_buff *skb;
1219 
1220 	skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1221 	if (!skb)
1222 		return NULL;
1223 
1224 	if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1225 		tcp_skb_entail(ssk, skb);
1226 		return skb;
1227 	}
1228 	tcp_skb_tsorted_anchor_cleanup(skb);
1229 	kfree_skb(skb);
1230 	return NULL;
1231 }
1232 
1233 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
1234 {
1235 	gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
1236 
1237 	return __mptcp_alloc_tx_skb(sk, ssk, gfp);
1238 }
1239 
1240 /* note: this always recompute the csum on the whole skb, even
1241  * if we just appended a single frag. More status info needed
1242  */
1243 static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
1244 {
1245 	struct mptcp_ext *mpext = mptcp_get_ext(skb);
1246 	__wsum csum = ~csum_unfold(mpext->csum);
1247 	int offset = skb->len - added;
1248 
1249 	mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
1250 }
1251 
1252 static void mptcp_update_infinite_map(struct mptcp_sock *msk,
1253 				      struct sock *ssk,
1254 				      struct mptcp_ext *mpext)
1255 {
1256 	if (!mpext)
1257 		return;
1258 
1259 	mpext->infinite_map = 1;
1260 	mpext->data_len = 0;
1261 
1262 	if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) {
1263 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
1264 		mptcp_subflow_reset(ssk);
1265 		return;
1266 	}
1267 
1268 	mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
1269 }
1270 
1271 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
1272 
1273 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1274 			      struct mptcp_data_frag *dfrag,
1275 			      struct mptcp_sendmsg_info *info)
1276 {
1277 	u64 data_seq = dfrag->data_seq + info->sent;
1278 	int offset = dfrag->offset + info->sent;
1279 	struct mptcp_sock *msk = mptcp_sk(sk);
1280 	bool zero_window_probe = false;
1281 	struct mptcp_ext *mpext = NULL;
1282 	bool can_coalesce = false;
1283 	bool reuse_skb = true;
1284 	struct sk_buff *skb;
1285 	size_t copy;
1286 	int i;
1287 
1288 	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
1289 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1290 
1291 	if (WARN_ON_ONCE(info->sent > info->limit ||
1292 			 info->limit > dfrag->data_len))
1293 		return 0;
1294 
1295 	if (unlikely(!__tcp_can_send(ssk)))
1296 		return -EAGAIN;
1297 
1298 	/* compute send limit */
1299 	if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
1300 		ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
1301 	info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1302 	copy = info->size_goal;
1303 
1304 	skb = tcp_write_queue_tail(ssk);
1305 	if (skb && copy > skb->len) {
1306 		/* Limit the write to the size available in the
1307 		 * current skb, if any, so that we create at most a new skb.
1308 		 * Explicitly tells TCP internals to avoid collapsing on later
1309 		 * queue management operation, to avoid breaking the ext <->
1310 		 * SSN association set here
1311 		 */
1312 		mpext = mptcp_get_ext(skb);
1313 		if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
1314 			TCP_SKB_CB(skb)->eor = 1;
1315 			tcp_mark_push(tcp_sk(ssk), skb);
1316 			goto alloc_skb;
1317 		}
1318 
1319 		i = skb_shinfo(skb)->nr_frags;
1320 		can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
1321 		if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
1322 			tcp_mark_push(tcp_sk(ssk), skb);
1323 			goto alloc_skb;
1324 		}
1325 
1326 		copy -= skb->len;
1327 	} else {
1328 alloc_skb:
1329 		skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
1330 		if (!skb)
1331 			return -ENOMEM;
1332 
1333 		i = skb_shinfo(skb)->nr_frags;
1334 		reuse_skb = false;
1335 		mpext = mptcp_get_ext(skb);
1336 	}
1337 
1338 	/* Zero window and all data acked? Probe. */
1339 	copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy);
1340 	if (copy == 0) {
1341 		u64 snd_una = READ_ONCE(msk->snd_una);
1342 
1343 		/* No need for zero probe if there are any data pending
1344 		 * either at the msk or ssk level; skb is the current write
1345 		 * queue tail and can be empty at this point.
1346 		 */
1347 		if (snd_una != msk->snd_nxt || skb->len ||
1348 		    skb != tcp_send_head(ssk)) {
1349 			tcp_remove_empty_skb(ssk);
1350 			return 0;
1351 		}
1352 
1353 		zero_window_probe = true;
1354 		data_seq = snd_una - 1;
1355 		copy = 1;
1356 	}
1357 
1358 	copy = min_t(size_t, copy, info->limit - info->sent);
1359 	if (!sk_wmem_schedule(ssk, copy)) {
1360 		tcp_remove_empty_skb(ssk);
1361 		return -ENOMEM;
1362 	}
1363 
1364 	if (can_coalesce) {
1365 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1366 	} else {
1367 		get_page(dfrag->page);
1368 		skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
1369 	}
1370 
1371 	skb->len += copy;
1372 	skb->data_len += copy;
1373 	skb->truesize += copy;
1374 	sk_wmem_queued_add(ssk, copy);
1375 	sk_mem_charge(ssk, copy);
1376 	WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
1377 	TCP_SKB_CB(skb)->end_seq += copy;
1378 	tcp_skb_pcount_set(skb, 0);
1379 
1380 	/* on skb reuse we just need to update the DSS len */
1381 	if (reuse_skb) {
1382 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1383 		mpext->data_len += copy;
1384 		goto out;
1385 	}
1386 
1387 	memset(mpext, 0, sizeof(*mpext));
1388 	mpext->data_seq = data_seq;
1389 	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1390 	mpext->data_len = copy;
1391 	mpext->use_map = 1;
1392 	mpext->dsn64 = 1;
1393 
1394 	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
1395 		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1396 		 mpext->dsn64);
1397 
1398 	if (zero_window_probe) {
1399 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
1400 		mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1401 		mpext->frozen = 1;
1402 		if (READ_ONCE(msk->csum_enabled))
1403 			mptcp_update_data_checksum(skb, copy);
1404 		tcp_push_pending_frames(ssk);
1405 		return 0;
1406 	}
1407 out:
1408 	if (READ_ONCE(msk->csum_enabled))
1409 		mptcp_update_data_checksum(skb, copy);
1410 	if (mptcp_subflow_ctx(ssk)->send_infinite_map)
1411 		mptcp_update_infinite_map(msk, ssk, mpext);
1412 	trace_mptcp_sendmsg_frag(mpext);
1413 	mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1414 	return copy;
1415 }
1416 
1417 #define MPTCP_SEND_BURST_SIZE		((1 << 16) - \
1418 					 sizeof(struct tcphdr) - \
1419 					 MAX_TCP_OPTION_SPACE - \
1420 					 sizeof(struct ipv6hdr) - \
1421 					 sizeof(struct frag_hdr))
1422 
1423 struct subflow_send_info {
1424 	struct sock *ssk;
1425 	u64 linger_time;
1426 };
1427 
1428 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1429 {
1430 	if (!subflow->stale)
1431 		return;
1432 
1433 	subflow->stale = 0;
1434 	MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1435 }
1436 
1437 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1438 {
1439 	if (unlikely(subflow->stale)) {
1440 		u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1441 
1442 		if (subflow->stale_rcv_tstamp == rcv_tstamp)
1443 			return false;
1444 
1445 		mptcp_subflow_set_active(subflow);
1446 	}
1447 	return __mptcp_subflow_active(subflow);
1448 }
1449 
1450 #define SSK_MODE_ACTIVE	0
1451 #define SSK_MODE_BACKUP	1
1452 #define SSK_MODE_MAX	2
1453 
1454 /* implement the mptcp packet scheduler;
1455  * returns the subflow that will transmit the next DSS
1456  * additionally updates the rtx timeout
1457  */
1458 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1459 {
1460 	struct subflow_send_info send_info[SSK_MODE_MAX];
1461 	struct mptcp_subflow_context *subflow;
1462 	struct sock *sk = (struct sock *)msk;
1463 	u32 pace, burst, wmem;
1464 	int i, nr_active = 0;
1465 	struct sock *ssk;
1466 	u64 linger_time;
1467 	long tout = 0;
1468 
1469 	/* pick the subflow with the lower wmem/wspace ratio */
1470 	for (i = 0; i < SSK_MODE_MAX; ++i) {
1471 		send_info[i].ssk = NULL;
1472 		send_info[i].linger_time = -1;
1473 	}
1474 
1475 	mptcp_for_each_subflow(msk, subflow) {
1476 		bool backup = subflow->backup || subflow->request_bkup;
1477 
1478 		trace_mptcp_subflow_get_send(subflow);
1479 		ssk =  mptcp_subflow_tcp_sock(subflow);
1480 		if (!mptcp_subflow_active(subflow))
1481 			continue;
1482 
1483 		tout = max(tout, mptcp_timeout_from_subflow(subflow));
1484 		nr_active += !backup;
1485 		pace = subflow->avg_pacing_rate;
1486 		if (unlikely(!pace)) {
1487 			/* init pacing rate from socket */
1488 			subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1489 			pace = subflow->avg_pacing_rate;
1490 			if (!pace)
1491 				continue;
1492 		}
1493 
1494 		linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
1495 		if (linger_time < send_info[backup].linger_time) {
1496 			send_info[backup].ssk = ssk;
1497 			send_info[backup].linger_time = linger_time;
1498 		}
1499 	}
1500 	__mptcp_set_timeout(sk, tout);
1501 
1502 	/* pick the best backup if no other subflow is active */
1503 	if (!nr_active)
1504 		send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
1505 
1506 	/* According to the blest algorithm, to avoid HoL blocking for the
1507 	 * faster flow, we need to:
1508 	 * - estimate the faster flow linger time
1509 	 * - use the above to estimate the amount of byte transferred
1510 	 *   by the faster flow
1511 	 * - check that the amount of queued data is greater than the above,
1512 	 *   otherwise do not use the picked, slower, subflow
1513 	 * We select the subflow with the shorter estimated time to flush
1514 	 * the queued mem, which basically ensure the above. We just need
1515 	 * to check that subflow has a non empty cwin.
1516 	 */
1517 	ssk = send_info[SSK_MODE_ACTIVE].ssk;
1518 	if (!ssk || !sk_stream_memory_free(ssk))
1519 		return NULL;
1520 
1521 	burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
1522 	wmem = READ_ONCE(ssk->sk_wmem_queued);
1523 	if (!burst)
1524 		return ssk;
1525 
1526 	subflow = mptcp_subflow_ctx(ssk);
1527 	subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
1528 					   READ_ONCE(ssk->sk_pacing_rate) * burst,
1529 					   burst + wmem);
1530 	msk->snd_burst = burst;
1531 	return ssk;
1532 }
1533 
1534 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
1535 {
1536 	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1537 	release_sock(ssk);
1538 }
1539 
1540 static void mptcp_update_post_push(struct mptcp_sock *msk,
1541 				   struct mptcp_data_frag *dfrag,
1542 				   u32 sent)
1543 {
1544 	u64 snd_nxt_new = dfrag->data_seq;
1545 
1546 	dfrag->already_sent += sent;
1547 
1548 	msk->snd_burst -= sent;
1549 
1550 	snd_nxt_new += dfrag->already_sent;
1551 
1552 	/* snd_nxt_new can be smaller than snd_nxt in case mptcp
1553 	 * is recovering after a failover. In that event, this re-sends
1554 	 * old segments.
1555 	 *
1556 	 * Thus compute snd_nxt_new candidate based on
1557 	 * the dfrag->data_seq that was sent and the data
1558 	 * that has been handed to the subflow for transmission
1559 	 * and skip update in case it was old dfrag.
1560 	 */
1561 	if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
1562 		msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
1563 		WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
1564 	}
1565 }
1566 
1567 void mptcp_check_and_set_pending(struct sock *sk)
1568 {
1569 	if (mptcp_send_head(sk)) {
1570 		mptcp_data_lock(sk);
1571 		mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING);
1572 		mptcp_data_unlock(sk);
1573 	}
1574 }
1575 
1576 static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
1577 				  struct mptcp_sendmsg_info *info)
1578 {
1579 	struct mptcp_sock *msk = mptcp_sk(sk);
1580 	struct mptcp_data_frag *dfrag;
1581 	int len, copied = 0, err = 0;
1582 
1583 	while ((dfrag = mptcp_send_head(sk))) {
1584 		info->sent = dfrag->already_sent;
1585 		info->limit = dfrag->data_len;
1586 		len = dfrag->data_len - dfrag->already_sent;
1587 		while (len > 0) {
1588 			int ret = 0;
1589 
1590 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info);
1591 			if (ret <= 0) {
1592 				err = copied ? : ret;
1593 				goto out;
1594 			}
1595 
1596 			info->sent += ret;
1597 			copied += ret;
1598 			len -= ret;
1599 
1600 			mptcp_update_post_push(msk, dfrag, ret);
1601 		}
1602 		msk->first_pending = mptcp_send_next(sk);
1603 
1604 		if (msk->snd_burst <= 0 ||
1605 		    !sk_stream_memory_free(ssk) ||
1606 		    !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
1607 			err = copied;
1608 			goto out;
1609 		}
1610 		mptcp_set_timeout(sk);
1611 	}
1612 	err = copied;
1613 
1614 out:
1615 	if (err > 0)
1616 		msk->last_data_sent = tcp_jiffies32;
1617 	return err;
1618 }
1619 
1620 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
1621 {
1622 	struct sock *prev_ssk = NULL, *ssk = NULL;
1623 	struct mptcp_sock *msk = mptcp_sk(sk);
1624 	struct mptcp_sendmsg_info info = {
1625 				.flags = flags,
1626 	};
1627 	bool copied = false;
1628 	int push_count = 1;
1629 
1630 	while (mptcp_send_head(sk) && (push_count > 0)) {
1631 		struct mptcp_subflow_context *subflow;
1632 		int ret = 0;
1633 
1634 		if (mptcp_sched_get_send(msk))
1635 			break;
1636 
1637 		push_count = 0;
1638 
1639 		mptcp_for_each_subflow(msk, subflow) {
1640 			if (READ_ONCE(subflow->scheduled)) {
1641 				mptcp_subflow_set_scheduled(subflow, false);
1642 
1643 				prev_ssk = ssk;
1644 				ssk = mptcp_subflow_tcp_sock(subflow);
1645 				if (ssk != prev_ssk) {
1646 					/* First check. If the ssk has changed since
1647 					 * the last round, release prev_ssk
1648 					 */
1649 					if (prev_ssk)
1650 						mptcp_push_release(prev_ssk, &info);
1651 
1652 					/* Need to lock the new subflow only if different
1653 					 * from the previous one, otherwise we are still
1654 					 * helding the relevant lock
1655 					 */
1656 					lock_sock(ssk);
1657 				}
1658 
1659 				push_count++;
1660 
1661 				ret = __subflow_push_pending(sk, ssk, &info);
1662 				if (ret <= 0) {
1663 					if (ret != -EAGAIN ||
1664 					    (1 << ssk->sk_state) &
1665 					     (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE))
1666 						push_count--;
1667 					continue;
1668 				}
1669 				copied = true;
1670 			}
1671 		}
1672 	}
1673 
1674 	/* at this point we held the socket lock for the last subflow we used */
1675 	if (ssk)
1676 		mptcp_push_release(ssk, &info);
1677 
1678 	/* Avoid scheduling the rtx timer if no data has been pushed; the timer
1679 	 * will be updated on positive acks by __mptcp_cleanup_una().
1680 	 */
1681 	if (copied) {
1682 		if (!mptcp_rtx_timer_pending(sk))
1683 			mptcp_reset_rtx_timer(sk);
1684 		mptcp_check_send_data_fin(sk);
1685 	}
1686 }
1687 
1688 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
1689 {
1690 	struct mptcp_sock *msk = mptcp_sk(sk);
1691 	struct mptcp_sendmsg_info info = {
1692 		.data_lock_held = true,
1693 	};
1694 	bool keep_pushing = true;
1695 	struct sock *xmit_ssk;
1696 	int copied = 0;
1697 
1698 	info.flags = 0;
1699 	while (mptcp_send_head(sk) && keep_pushing) {
1700 		struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1701 		int ret = 0;
1702 
1703 		/* check for a different subflow usage only after
1704 		 * spooling the first chunk of data
1705 		 */
1706 		if (first) {
1707 			mptcp_subflow_set_scheduled(subflow, false);
1708 			ret = __subflow_push_pending(sk, ssk, &info);
1709 			first = false;
1710 			if (ret <= 0)
1711 				break;
1712 			copied += ret;
1713 			continue;
1714 		}
1715 
1716 		if (mptcp_sched_get_send(msk))
1717 			goto out;
1718 
1719 		if (READ_ONCE(subflow->scheduled)) {
1720 			mptcp_subflow_set_scheduled(subflow, false);
1721 			ret = __subflow_push_pending(sk, ssk, &info);
1722 			if (ret <= 0)
1723 				keep_pushing = false;
1724 			copied += ret;
1725 		}
1726 
1727 		mptcp_for_each_subflow(msk, subflow) {
1728 			if (READ_ONCE(subflow->scheduled)) {
1729 				xmit_ssk = mptcp_subflow_tcp_sock(subflow);
1730 				if (xmit_ssk != ssk) {
1731 					mptcp_subflow_delegate(subflow,
1732 							       MPTCP_DELEGATE_SEND);
1733 					keep_pushing = false;
1734 				}
1735 			}
1736 		}
1737 	}
1738 
1739 out:
1740 	/* __mptcp_alloc_tx_skb could have released some wmem and we are
1741 	 * not going to flush it via release_sock()
1742 	 */
1743 	if (copied) {
1744 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1745 			 info.size_goal);
1746 		if (!mptcp_rtx_timer_pending(sk))
1747 			mptcp_reset_rtx_timer(sk);
1748 
1749 		if (msk->snd_data_fin_enable &&
1750 		    msk->snd_nxt + 1 == msk->write_seq)
1751 			mptcp_schedule_work(sk);
1752 	}
1753 }
1754 
1755 static int mptcp_disconnect(struct sock *sk, int flags);
1756 
1757 static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1758 				  size_t len, int *copied_syn)
1759 {
1760 	unsigned int saved_flags = msg->msg_flags;
1761 	struct mptcp_sock *msk = mptcp_sk(sk);
1762 	struct sock *ssk;
1763 	int ret;
1764 
1765 	/* on flags based fastopen the mptcp is supposed to create the
1766 	 * first subflow right now. Otherwise we are in the defer_connect
1767 	 * path, and the first subflow must be already present.
1768 	 * Since the defer_connect flag is cleared after the first succsful
1769 	 * fastopen attempt, no need to check for additional subflow status.
1770 	 */
1771 	if (msg->msg_flags & MSG_FASTOPEN) {
1772 		ssk = __mptcp_nmpc_sk(msk);
1773 		if (IS_ERR(ssk))
1774 			return PTR_ERR(ssk);
1775 	}
1776 	if (!msk->first)
1777 		return -EINVAL;
1778 
1779 	ssk = msk->first;
1780 
1781 	lock_sock(ssk);
1782 	msg->msg_flags |= MSG_DONTWAIT;
1783 	msk->fastopening = 1;
1784 	ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
1785 	msk->fastopening = 0;
1786 	msg->msg_flags = saved_flags;
1787 	release_sock(ssk);
1788 
1789 	/* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1790 	if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) {
1791 		ret = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1792 					    msg->msg_namelen, msg->msg_flags, 1);
1793 
1794 		/* Keep the same behaviour of plain TCP: zero the copied bytes in
1795 		 * case of any error, except timeout or signal
1796 		 */
1797 		if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
1798 			*copied_syn = 0;
1799 	} else if (ret && ret != -EINPROGRESS) {
1800 		/* The disconnect() op called by tcp_sendmsg_fastopen()/
1801 		 * __inet_stream_connect() can fail, due to looking check,
1802 		 * see mptcp_disconnect().
1803 		 * Attempt it again outside the problematic scope.
1804 		 */
1805 		if (!mptcp_disconnect(sk, 0)) {
1806 			sk->sk_disconnects++;
1807 			sk->sk_socket->state = SS_UNCONNECTED;
1808 		}
1809 	}
1810 	inet_clear_bit(DEFER_CONNECT, sk);
1811 
1812 	return ret;
1813 }
1814 
1815 static int do_copy_data_nocache(struct sock *sk, int copy,
1816 				struct iov_iter *from, char *to)
1817 {
1818 	if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1819 		if (!copy_from_iter_full_nocache(to, copy, from))
1820 			return -EFAULT;
1821 	} else if (!copy_from_iter_full(to, copy, from)) {
1822 		return -EFAULT;
1823 	}
1824 	return 0;
1825 }
1826 
1827 /* open-code sk_stream_memory_free() plus sent limit computation to
1828  * avoid indirect calls in fast-path.
1829  * Called under the msk socket lock, so we can avoid a bunch of ONCE
1830  * annotations.
1831  */
1832 static u32 mptcp_send_limit(const struct sock *sk)
1833 {
1834 	const struct mptcp_sock *msk = mptcp_sk(sk);
1835 	u32 limit, not_sent;
1836 
1837 	if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
1838 		return 0;
1839 
1840 	limit = mptcp_notsent_lowat(sk);
1841 	if (limit == UINT_MAX)
1842 		return UINT_MAX;
1843 
1844 	not_sent = msk->write_seq - msk->snd_nxt;
1845 	if (not_sent >= limit)
1846 		return 0;
1847 
1848 	return limit - not_sent;
1849 }
1850 
1851 static void mptcp_rps_record_subflows(const struct mptcp_sock *msk)
1852 {
1853 	struct mptcp_subflow_context *subflow;
1854 
1855 	if (!rfs_is_needed())
1856 		return;
1857 
1858 	mptcp_for_each_subflow(msk, subflow) {
1859 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1860 
1861 		sock_rps_record_flow(ssk);
1862 	}
1863 }
1864 
1865 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1866 {
1867 	struct mptcp_sock *msk = mptcp_sk(sk);
1868 	struct page_frag *pfrag;
1869 	size_t copied = 0;
1870 	int ret = 0;
1871 	long timeo;
1872 
1873 	/* silently ignore everything else */
1874 	msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_FASTOPEN;
1875 
1876 	lock_sock(sk);
1877 
1878 	mptcp_rps_record_subflows(msk);
1879 
1880 	if (unlikely(inet_test_bit(DEFER_CONNECT, sk) ||
1881 		     msg->msg_flags & MSG_FASTOPEN)) {
1882 		int copied_syn = 0;
1883 
1884 		ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn);
1885 		copied += copied_syn;
1886 		if (ret == -EINPROGRESS && copied_syn > 0)
1887 			goto out;
1888 		else if (ret)
1889 			goto do_error;
1890 	}
1891 
1892 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1893 
1894 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1895 		ret = sk_stream_wait_connect(sk, &timeo);
1896 		if (ret)
1897 			goto do_error;
1898 	}
1899 
1900 	ret = -EPIPE;
1901 	if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)))
1902 		goto do_error;
1903 
1904 	pfrag = sk_page_frag(sk);
1905 
1906 	while (msg_data_left(msg)) {
1907 		int total_ts, frag_truesize = 0;
1908 		struct mptcp_data_frag *dfrag;
1909 		bool dfrag_collapsed;
1910 		size_t psize, offset;
1911 		u32 copy_limit;
1912 
1913 		/* ensure fitting the notsent_lowat() constraint */
1914 		copy_limit = mptcp_send_limit(sk);
1915 		if (!copy_limit)
1916 			goto wait_for_memory;
1917 
1918 		/* reuse tail pfrag, if possible, or carve a new one from the
1919 		 * page allocator
1920 		 */
1921 		dfrag = mptcp_pending_tail(sk);
1922 		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1923 		if (!dfrag_collapsed) {
1924 			if (!mptcp_page_frag_refill(sk, pfrag))
1925 				goto wait_for_memory;
1926 
1927 			dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1928 			frag_truesize = dfrag->overhead;
1929 		}
1930 
1931 		/* we do not bound vs wspace, to allow a single packet.
1932 		 * memory accounting will prevent execessive memory usage
1933 		 * anyway
1934 		 */
1935 		offset = dfrag->offset + dfrag->data_len;
1936 		psize = pfrag->size - offset;
1937 		psize = min_t(size_t, psize, msg_data_left(msg));
1938 		psize = min_t(size_t, psize, copy_limit);
1939 		total_ts = psize + frag_truesize;
1940 
1941 		if (!sk_wmem_schedule(sk, total_ts))
1942 			goto wait_for_memory;
1943 
1944 		ret = do_copy_data_nocache(sk, psize, &msg->msg_iter,
1945 					   page_address(dfrag->page) + offset);
1946 		if (ret)
1947 			goto do_error;
1948 
1949 		/* data successfully copied into the write queue */
1950 		sk_forward_alloc_add(sk, -total_ts);
1951 		copied += psize;
1952 		dfrag->data_len += psize;
1953 		frag_truesize += psize;
1954 		pfrag->offset += frag_truesize;
1955 		WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1956 
1957 		/* charge data on mptcp pending queue to the msk socket
1958 		 * Note: we charge such data both to sk and ssk
1959 		 */
1960 		sk_wmem_queued_add(sk, frag_truesize);
1961 		if (!dfrag_collapsed) {
1962 			get_page(dfrag->page);
1963 			list_add_tail(&dfrag->list, &msk->rtx_queue);
1964 			if (!msk->first_pending)
1965 				msk->first_pending = dfrag;
1966 		}
1967 		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
1968 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
1969 			 !dfrag_collapsed);
1970 
1971 		continue;
1972 
1973 wait_for_memory:
1974 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1975 		__mptcp_push_pending(sk, msg->msg_flags);
1976 		ret = sk_stream_wait_memory(sk, &timeo);
1977 		if (ret)
1978 			goto do_error;
1979 	}
1980 
1981 	if (copied)
1982 		__mptcp_push_pending(sk, msg->msg_flags);
1983 
1984 out:
1985 	release_sock(sk);
1986 	return copied;
1987 
1988 do_error:
1989 	if (copied)
1990 		goto out;
1991 
1992 	copied = sk_stream_error(sk, msg->msg_flags, ret);
1993 	goto out;
1994 }
1995 
1996 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
1997 
1998 static void mptcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
1999 {
2000 	/* avoid the indirect call, we know the destructor is sock_rfree */
2001 	skb->destructor = NULL;
2002 	skb->sk = NULL;
2003 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2004 	sk_mem_uncharge(sk, skb->truesize);
2005 	__skb_unlink(skb, &sk->sk_receive_queue);
2006 	skb_attempt_defer_free(skb);
2007 }
2008 
2009 static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
2010 				size_t len, int flags, int copied_total,
2011 				struct scm_timestamping_internal *tss,
2012 				int *cmsg_flags)
2013 {
2014 	struct mptcp_sock *msk = mptcp_sk(sk);
2015 	struct sk_buff *skb, *tmp;
2016 	int total_data_len = 0;
2017 	int copied = 0;
2018 
2019 	skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
2020 		u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
2021 		u32 data_len = skb->len - offset;
2022 		u32 count;
2023 		int err;
2024 
2025 		if (flags & MSG_PEEK) {
2026 			/* skip already peeked skbs */
2027 			if (total_data_len + data_len <= copied_total) {
2028 				total_data_len += data_len;
2029 				continue;
2030 			}
2031 
2032 			/* skip the already peeked data in the current skb */
2033 			delta = copied_total - total_data_len;
2034 			offset += delta;
2035 			data_len -= delta;
2036 		}
2037 
2038 		count = min_t(size_t, len - copied, data_len);
2039 		if (!(flags & MSG_TRUNC)) {
2040 			err = skb_copy_datagram_msg(skb, offset, msg, count);
2041 			if (unlikely(err < 0)) {
2042 				if (!copied)
2043 					return err;
2044 				break;
2045 			}
2046 		}
2047 
2048 		if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
2049 			tcp_update_recv_tstamps(skb, tss);
2050 			*cmsg_flags |= MPTCP_CMSG_TS;
2051 		}
2052 
2053 		copied += count;
2054 
2055 		if (!(flags & MSG_PEEK)) {
2056 			msk->bytes_consumed += count;
2057 			if (count < data_len) {
2058 				MPTCP_SKB_CB(skb)->offset += count;
2059 				MPTCP_SKB_CB(skb)->map_seq += count;
2060 				break;
2061 			}
2062 
2063 			mptcp_eat_recv_skb(sk, skb);
2064 		}
2065 
2066 		if (copied >= len)
2067 			break;
2068 	}
2069 
2070 	mptcp_rcv_space_adjust(msk, copied);
2071 	return copied;
2072 }
2073 
2074 /* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
2075  *
2076  * Only difference: Use highest rtt estimate of the subflows in use.
2077  */
2078 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
2079 {
2080 	struct mptcp_subflow_context *subflow;
2081 	struct sock *sk = (struct sock *)msk;
2082 	u8 scaling_ratio = U8_MAX;
2083 	u32 time, advmss = 1;
2084 	u64 rtt_us, mstamp;
2085 
2086 	msk_owned_by_me(msk);
2087 
2088 	if (copied <= 0)
2089 		return;
2090 
2091 	if (!msk->rcvspace_init)
2092 		mptcp_rcv_space_init(msk, msk->first);
2093 
2094 	msk->rcvq_space.copied += copied;
2095 
2096 	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
2097 	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
2098 
2099 	rtt_us = msk->rcvq_space.rtt_us;
2100 	if (rtt_us && time < (rtt_us >> 3))
2101 		return;
2102 
2103 	rtt_us = 0;
2104 	mptcp_for_each_subflow(msk, subflow) {
2105 		const struct tcp_sock *tp;
2106 		u64 sf_rtt_us;
2107 		u32 sf_advmss;
2108 
2109 		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
2110 
2111 		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
2112 		sf_advmss = READ_ONCE(tp->advmss);
2113 
2114 		rtt_us = max(sf_rtt_us, rtt_us);
2115 		advmss = max(sf_advmss, advmss);
2116 		scaling_ratio = min(tp->scaling_ratio, scaling_ratio);
2117 	}
2118 
2119 	msk->rcvq_space.rtt_us = rtt_us;
2120 	msk->scaling_ratio = scaling_ratio;
2121 	if (time < (rtt_us >> 3) || rtt_us == 0)
2122 		return;
2123 
2124 	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
2125 		goto new_measure;
2126 
2127 	if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {
2128 		/* Make subflows follow along.  If we do not do this, we
2129 		 * get drops at subflow level if skbs can't be moved to
2130 		 * the mptcp rx queue fast enough (announced rcv_win can
2131 		 * exceed ssk->sk_rcvbuf).
2132 		 */
2133 		mptcp_for_each_subflow(msk, subflow) {
2134 			struct sock *ssk;
2135 			bool slow;
2136 
2137 			ssk = mptcp_subflow_tcp_sock(subflow);
2138 			slow = lock_sock_fast(ssk);
2139 			/* subflows can be added before tcp_init_transfer() */
2140 			if (tcp_sk(ssk)->rcvq_space.space)
2141 				tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);
2142 			unlock_sock_fast(ssk, slow);
2143 		}
2144 	}
2145 
2146 new_measure:
2147 	msk->rcvq_space.copied = 0;
2148 	msk->rcvq_space.time = mstamp;
2149 }
2150 
2151 static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta)
2152 {
2153 	struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list);
2154 	struct mptcp_sock *msk = mptcp_sk(sk);
2155 	bool moved = false;
2156 
2157 	*delta = 0;
2158 	while (1) {
2159 		/* If the msk recvbuf is full stop, don't drop */
2160 		if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2161 			break;
2162 
2163 		prefetch(skb->next);
2164 		list_del(&skb->list);
2165 		*delta += skb->truesize;
2166 
2167 		moved |= __mptcp_move_skb(sk, skb);
2168 		if (list_empty(skbs))
2169 			break;
2170 
2171 		skb = list_first_entry(skbs, struct sk_buff, list);
2172 	}
2173 
2174 	__mptcp_ofo_queue(msk);
2175 	if (moved)
2176 		mptcp_check_data_fin((struct sock *)msk);
2177 	return moved;
2178 }
2179 
2180 static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs)
2181 {
2182 	struct mptcp_sock *msk = mptcp_sk(sk);
2183 
2184 	/* After CG initialization, subflows should never add skb before
2185 	 * gaining the CG themself.
2186 	 */
2187 	DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket &&
2188 			       mem_cgroup_from_sk(sk));
2189 
2190 	/* Don't spool the backlog if the rcvbuf is full. */
2191 	if (list_empty(&msk->backlog_list) ||
2192 	    sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2193 		return false;
2194 
2195 	INIT_LIST_HEAD(skbs);
2196 	list_splice_init(&msk->backlog_list, skbs);
2197 	return true;
2198 }
2199 
2200 static void mptcp_backlog_spooled(struct sock *sk, u32 moved,
2201 				  struct list_head *skbs)
2202 {
2203 	struct mptcp_sock *msk = mptcp_sk(sk);
2204 
2205 	WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved);
2206 	list_splice(skbs, &msk->backlog_list);
2207 }
2208 
2209 static bool mptcp_move_skbs(struct sock *sk)
2210 {
2211 	struct list_head skbs;
2212 	bool enqueued = false;
2213 	u32 moved;
2214 
2215 	mptcp_data_lock(sk);
2216 	while (mptcp_can_spool_backlog(sk, &skbs)) {
2217 		mptcp_data_unlock(sk);
2218 		enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
2219 
2220 		mptcp_data_lock(sk);
2221 		mptcp_backlog_spooled(sk, moved, &skbs);
2222 	}
2223 	mptcp_data_unlock(sk);
2224 	return enqueued;
2225 }
2226 
2227 static unsigned int mptcp_inq_hint(const struct sock *sk)
2228 {
2229 	const struct mptcp_sock *msk = mptcp_sk(sk);
2230 	const struct sk_buff *skb;
2231 
2232 	skb = skb_peek(&sk->sk_receive_queue);
2233 	if (skb) {
2234 		u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
2235 
2236 		if (hint_val >= INT_MAX)
2237 			return INT_MAX;
2238 
2239 		return (unsigned int)hint_val;
2240 	}
2241 
2242 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
2243 		return 1;
2244 
2245 	return 0;
2246 }
2247 
2248 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2249 			 int flags, int *addr_len)
2250 {
2251 	struct mptcp_sock *msk = mptcp_sk(sk);
2252 	struct scm_timestamping_internal tss;
2253 	int copied = 0, cmsg_flags = 0;
2254 	int target;
2255 	long timeo;
2256 
2257 	/* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2258 	if (unlikely(flags & MSG_ERRQUEUE))
2259 		return inet_recv_error(sk, msg, len, addr_len);
2260 
2261 	lock_sock(sk);
2262 	if (unlikely(sk->sk_state == TCP_LISTEN)) {
2263 		copied = -ENOTCONN;
2264 		goto out_err;
2265 	}
2266 
2267 	mptcp_rps_record_subflows(msk);
2268 
2269 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2270 
2271 	len = min_t(size_t, len, INT_MAX);
2272 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2273 
2274 	if (unlikely(msk->recvmsg_inq))
2275 		cmsg_flags = MPTCP_CMSG_INQ;
2276 
2277 	while (copied < len) {
2278 		int err, bytes_read;
2279 
2280 		bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
2281 						  copied, &tss, &cmsg_flags);
2282 		if (unlikely(bytes_read < 0)) {
2283 			if (!copied)
2284 				copied = bytes_read;
2285 			goto out_err;
2286 		}
2287 
2288 		copied += bytes_read;
2289 
2290 		if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk))
2291 			continue;
2292 
2293 		/* only the MPTCP socket status is relevant here. The exit
2294 		 * conditions mirror closely tcp_recvmsg()
2295 		 */
2296 		if (copied >= target)
2297 			break;
2298 
2299 		if (copied) {
2300 			if (sk->sk_err ||
2301 			    sk->sk_state == TCP_CLOSE ||
2302 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2303 			    !timeo ||
2304 			    signal_pending(current))
2305 				break;
2306 		} else {
2307 			if (sk->sk_err) {
2308 				copied = sock_error(sk);
2309 				break;
2310 			}
2311 
2312 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2313 				break;
2314 
2315 			if (sk->sk_state == TCP_CLOSE) {
2316 				copied = -ENOTCONN;
2317 				break;
2318 			}
2319 
2320 			if (!timeo) {
2321 				copied = -EAGAIN;
2322 				break;
2323 			}
2324 
2325 			if (signal_pending(current)) {
2326 				copied = sock_intr_errno(timeo);
2327 				break;
2328 			}
2329 		}
2330 
2331 		pr_debug("block timeout %ld\n", timeo);
2332 		mptcp_cleanup_rbuf(msk, copied);
2333 		err = sk_wait_data(sk, &timeo, NULL);
2334 		if (err < 0) {
2335 			err = copied ? : err;
2336 			goto out_err;
2337 		}
2338 	}
2339 
2340 	mptcp_cleanup_rbuf(msk, copied);
2341 
2342 out_err:
2343 	if (cmsg_flags && copied >= 0) {
2344 		if (cmsg_flags & MPTCP_CMSG_TS)
2345 			tcp_recv_timestamp(msg, sk, &tss);
2346 
2347 		if (cmsg_flags & MPTCP_CMSG_INQ) {
2348 			unsigned int inq = mptcp_inq_hint(sk);
2349 
2350 			put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2351 		}
2352 	}
2353 
2354 	pr_debug("msk=%p rx queue empty=%d copied=%d\n",
2355 		 msk, skb_queue_empty(&sk->sk_receive_queue), copied);
2356 
2357 	release_sock(sk);
2358 	return copied;
2359 }
2360 
2361 static void mptcp_retransmit_timer(struct timer_list *t)
2362 {
2363 	struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer);
2364 	struct mptcp_sock *msk = mptcp_sk(sk);
2365 
2366 	bh_lock_sock(sk);
2367 	if (!sock_owned_by_user(sk)) {
2368 		/* we need a process context to retransmit */
2369 		if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2370 			mptcp_schedule_work(sk);
2371 	} else {
2372 		/* delegate our work to tcp_release_cb() */
2373 		__set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
2374 	}
2375 	bh_unlock_sock(sk);
2376 	sock_put(sk);
2377 }
2378 
2379 static void mptcp_tout_timer(struct timer_list *t)
2380 {
2381 	struct inet_connection_sock *icsk =
2382 		timer_container_of(icsk, t, mptcp_tout_timer);
2383 	struct sock *sk = &icsk->icsk_inet.sk;
2384 
2385 	mptcp_schedule_work(sk);
2386 	sock_put(sk);
2387 }
2388 
2389 /* Find an idle subflow.  Return NULL if there is unacked data at tcp
2390  * level.
2391  *
2392  * A backup subflow is returned only if that is the only kind available.
2393  */
2394 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2395 {
2396 	struct sock *backup = NULL, *pick = NULL;
2397 	struct mptcp_subflow_context *subflow;
2398 	int min_stale_count = INT_MAX;
2399 
2400 	mptcp_for_each_subflow(msk, subflow) {
2401 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2402 
2403 		if (!__mptcp_subflow_active(subflow))
2404 			continue;
2405 
2406 		/* still data outstanding at TCP level? skip this */
2407 		if (!tcp_rtx_and_write_queues_empty(ssk)) {
2408 			mptcp_pm_subflow_chk_stale(msk, ssk);
2409 			min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2410 			continue;
2411 		}
2412 
2413 		if (subflow->backup || subflow->request_bkup) {
2414 			if (!backup)
2415 				backup = ssk;
2416 			continue;
2417 		}
2418 
2419 		if (!pick)
2420 			pick = ssk;
2421 	}
2422 
2423 	if (pick)
2424 		return pick;
2425 
2426 	/* use backup only if there are no progresses anywhere */
2427 	return min_stale_count > 1 ? backup : NULL;
2428 }
2429 
2430 bool __mptcp_retransmit_pending_data(struct sock *sk)
2431 {
2432 	struct mptcp_data_frag *cur, *rtx_head;
2433 	struct mptcp_sock *msk = mptcp_sk(sk);
2434 
2435 	if (__mptcp_check_fallback(msk))
2436 		return false;
2437 
2438 	/* the closing socket has some data untransmitted and/or unacked:
2439 	 * some data in the mptcp rtx queue has not really xmitted yet.
2440 	 * keep it simple and re-inject the whole mptcp level rtx queue
2441 	 */
2442 	mptcp_data_lock(sk);
2443 	__mptcp_clean_una_wakeup(sk);
2444 	rtx_head = mptcp_rtx_head(sk);
2445 	if (!rtx_head) {
2446 		mptcp_data_unlock(sk);
2447 		return false;
2448 	}
2449 
2450 	msk->recovery_snd_nxt = msk->snd_nxt;
2451 	msk->recovery = true;
2452 	mptcp_data_unlock(sk);
2453 
2454 	msk->first_pending = rtx_head;
2455 	msk->snd_burst = 0;
2456 
2457 	/* be sure to clear the "sent status" on all re-injected fragments */
2458 	list_for_each_entry(cur, &msk->rtx_queue, list) {
2459 		if (!cur->already_sent)
2460 			break;
2461 		cur->already_sent = 0;
2462 	}
2463 
2464 	return true;
2465 }
2466 
2467 /* flags for __mptcp_close_ssk() */
2468 #define MPTCP_CF_PUSH		BIT(1)
2469 
2470 /* be sure to send a reset only if the caller asked for it, also
2471  * clean completely the subflow status when the subflow reaches
2472  * TCP_CLOSE state
2473  */
2474 static void __mptcp_subflow_disconnect(struct sock *ssk,
2475 				       struct mptcp_subflow_context *subflow,
2476 				       bool fastclosing)
2477 {
2478 	if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
2479 	    fastclosing) {
2480 		/* The MPTCP code never wait on the subflow sockets, TCP-level
2481 		 * disconnect should never fail
2482 		 */
2483 		WARN_ON_ONCE(tcp_disconnect(ssk, 0));
2484 		mptcp_subflow_ctx_reset(subflow);
2485 	} else {
2486 		tcp_shutdown(ssk, SEND_SHUTDOWN);
2487 	}
2488 }
2489 
2490 /* subflow sockets can be either outgoing (connect) or incoming
2491  * (accept).
2492  *
2493  * Outgoing subflows use in-kernel sockets.
2494  * Incoming subflows do not have their own 'struct socket' allocated,
2495  * so we need to use tcp_close() after detaching them from the mptcp
2496  * parent socket.
2497  */
2498 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2499 			      struct mptcp_subflow_context *subflow,
2500 			      unsigned int flags)
2501 {
2502 	struct mptcp_sock *msk = mptcp_sk(sk);
2503 	bool dispose_it, need_push = false;
2504 	int fwd_remaining;
2505 
2506 	/* Do not pass RX data to the msk, even if the subflow socket is not
2507 	 * going to be freed (i.e. even for the first subflow on graceful
2508 	 * subflow close.
2509 	 */
2510 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2511 	subflow->closing = 1;
2512 
2513 	/* Borrow the fwd allocated page left-over; fwd memory for the subflow
2514 	 * could be negative at this point, but will be reach zero soon - when
2515 	 * the data allocated using such fragment will be freed.
2516 	 */
2517 	if (subflow->lent_mem_frag) {
2518 		fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag;
2519 		sk_forward_alloc_add(sk, fwd_remaining);
2520 		sk_forward_alloc_add(ssk, -fwd_remaining);
2521 		subflow->lent_mem_frag = 0;
2522 	}
2523 
2524 	/* If the first subflow moved to a close state before accept, e.g. due
2525 	 * to an incoming reset or listener shutdown, the subflow socket is
2526 	 * already deleted by inet_child_forget() and the mptcp socket can't
2527 	 * survive too.
2528 	 */
2529 	if (msk->in_accept_queue && msk->first == ssk &&
2530 	    (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
2531 		/* ensure later check in mptcp_worker() will dispose the msk */
2532 		sock_set_flag(sk, SOCK_DEAD);
2533 		mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1));
2534 		mptcp_subflow_drop_ctx(ssk);
2535 		goto out_release;
2536 	}
2537 
2538 	dispose_it = msk->free_first || ssk != msk->first;
2539 	if (dispose_it)
2540 		list_del(&subflow->node);
2541 
2542 	if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
2543 		tcp_set_state(ssk, TCP_CLOSE);
2544 
2545 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
2546 	if (!dispose_it) {
2547 		__mptcp_subflow_disconnect(ssk, subflow, msk->fastclosing);
2548 		release_sock(ssk);
2549 
2550 		goto out;
2551 	}
2552 
2553 	subflow->disposable = 1;
2554 
2555 	/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2556 	 * the ssk has been already destroyed, we just need to release the
2557 	 * reference owned by msk;
2558 	 */
2559 	if (!inet_csk(ssk)->icsk_ulp_ops) {
2560 		WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
2561 		kfree_rcu(subflow, rcu);
2562 	} else {
2563 		/* otherwise tcp will dispose of the ssk and subflow ctx */
2564 		__tcp_close(ssk, 0);
2565 
2566 		/* close acquired an extra ref */
2567 		__sock_put(ssk);
2568 	}
2569 
2570 out_release:
2571 	__mptcp_subflow_error_report(sk, ssk);
2572 	release_sock(ssk);
2573 
2574 	sock_put(ssk);
2575 
2576 	if (ssk == msk->first)
2577 		WRITE_ONCE(msk->first, NULL);
2578 
2579 out:
2580 	__mptcp_sync_sndbuf(sk);
2581 	if (need_push)
2582 		__mptcp_push_pending(sk, 0);
2583 
2584 	/* Catch every 'all subflows closed' scenario, including peers silently
2585 	 * closing them, e.g. due to timeout.
2586 	 * For established sockets, allow an additional timeout before closing,
2587 	 * as the protocol can still create more subflows.
2588 	 */
2589 	if (list_is_singular(&msk->conn_list) && msk->first &&
2590 	    inet_sk_state_load(msk->first) == TCP_CLOSE) {
2591 		if (sk->sk_state != TCP_ESTABLISHED ||
2592 		    msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
2593 			mptcp_set_state(sk, TCP_CLOSE);
2594 			mptcp_close_wake_up(sk);
2595 		} else {
2596 			mptcp_start_tout_timer(sk);
2597 		}
2598 	}
2599 }
2600 
2601 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2602 		     struct mptcp_subflow_context *subflow)
2603 {
2604 	struct mptcp_sock *msk = mptcp_sk(sk);
2605 	struct sk_buff *skb;
2606 
2607 	/* The first subflow can already be closed or disconnected */
2608 	if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0)
2609 		return;
2610 
2611 	subflow->close_event_done = true;
2612 
2613 	if (sk->sk_state == TCP_ESTABLISHED)
2614 		mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2615 
2616 	/* Remove any reference from the backlog to this ssk; backlog skbs consume
2617 	 * space in the msk receive queue, no need to touch sk->sk_rmem_alloc
2618 	 */
2619 	list_for_each_entry(skb, &msk->backlog_list, list) {
2620 		if (skb->sk != ssk)
2621 			continue;
2622 
2623 		atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc);
2624 		skb->sk = NULL;
2625 	}
2626 
2627 	/* subflow aborted before reaching the fully_established status
2628 	 * attempt the creation of the next subflow
2629 	 */
2630 	mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow);
2631 
2632 	__mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2633 }
2634 
2635 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2636 {
2637 	return 0;
2638 }
2639 
2640 static void __mptcp_close_subflow(struct sock *sk)
2641 {
2642 	struct mptcp_subflow_context *subflow, *tmp;
2643 	struct mptcp_sock *msk = mptcp_sk(sk);
2644 
2645 	might_sleep();
2646 
2647 	mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2648 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2649 		int ssk_state = inet_sk_state_load(ssk);
2650 
2651 		if (ssk_state != TCP_CLOSE &&
2652 		    (ssk_state != TCP_CLOSE_WAIT ||
2653 		     inet_sk_state_load(sk) != TCP_ESTABLISHED ||
2654 		     __mptcp_check_fallback(msk)))
2655 			continue;
2656 
2657 		/* 'subflow_data_ready' will re-sched once rx queue is empty */
2658 		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2659 			continue;
2660 
2661 		mptcp_close_ssk(sk, ssk, subflow);
2662 	}
2663 
2664 }
2665 
2666 static bool mptcp_close_tout_expired(const struct sock *sk)
2667 {
2668 	if (!inet_csk(sk)->icsk_mtup.probe_timestamp ||
2669 	    sk->sk_state == TCP_CLOSE)
2670 		return false;
2671 
2672 	return time_after32(tcp_jiffies32,
2673 		  inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk));
2674 }
2675 
2676 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2677 {
2678 	struct mptcp_subflow_context *subflow, *tmp;
2679 	struct sock *sk = (struct sock *)msk;
2680 
2681 	if (likely(!READ_ONCE(msk->rcv_fastclose)))
2682 		return;
2683 
2684 	mptcp_token_destroy(msk);
2685 
2686 	mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2687 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2688 		bool slow;
2689 
2690 		slow = lock_sock_fast(tcp_sk);
2691 		if (tcp_sk->sk_state != TCP_CLOSE) {
2692 			mptcp_send_active_reset_reason(tcp_sk);
2693 			tcp_set_state(tcp_sk, TCP_CLOSE);
2694 		}
2695 		unlock_sock_fast(tcp_sk, slow);
2696 	}
2697 
2698 	/* Mirror the tcp_reset() error propagation */
2699 	switch (sk->sk_state) {
2700 	case TCP_SYN_SENT:
2701 		WRITE_ONCE(sk->sk_err, ECONNREFUSED);
2702 		break;
2703 	case TCP_CLOSE_WAIT:
2704 		WRITE_ONCE(sk->sk_err, EPIPE);
2705 		break;
2706 	case TCP_CLOSE:
2707 		return;
2708 	default:
2709 		WRITE_ONCE(sk->sk_err, ECONNRESET);
2710 	}
2711 
2712 	mptcp_set_state(sk, TCP_CLOSE);
2713 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
2714 	smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2715 	set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2716 
2717 	/* the calling mptcp_worker will properly destroy the socket */
2718 	if (sock_flag(sk, SOCK_DEAD))
2719 		return;
2720 
2721 	sk->sk_state_change(sk);
2722 	sk_error_report(sk);
2723 }
2724 
2725 static void __mptcp_retrans(struct sock *sk)
2726 {
2727 	struct mptcp_sendmsg_info info = { .data_lock_held = true, };
2728 	struct mptcp_sock *msk = mptcp_sk(sk);
2729 	struct mptcp_subflow_context *subflow;
2730 	struct mptcp_data_frag *dfrag;
2731 	struct sock *ssk;
2732 	int ret, err;
2733 	u16 len = 0;
2734 
2735 	mptcp_clean_una_wakeup(sk);
2736 
2737 	/* first check ssk: need to kick "stale" logic */
2738 	err = mptcp_sched_get_retrans(msk);
2739 	dfrag = mptcp_rtx_head(sk);
2740 	if (!dfrag) {
2741 		if (mptcp_data_fin_enabled(msk)) {
2742 			struct inet_connection_sock *icsk = inet_csk(sk);
2743 
2744 			WRITE_ONCE(icsk->icsk_retransmits,
2745 				   icsk->icsk_retransmits + 1);
2746 			mptcp_set_datafin_timeout(sk);
2747 			mptcp_send_ack(msk);
2748 
2749 			goto reset_timer;
2750 		}
2751 
2752 		if (!mptcp_send_head(sk))
2753 			goto clear_scheduled;
2754 
2755 		goto reset_timer;
2756 	}
2757 
2758 	if (err)
2759 		goto reset_timer;
2760 
2761 	mptcp_for_each_subflow(msk, subflow) {
2762 		if (READ_ONCE(subflow->scheduled)) {
2763 			u16 copied = 0;
2764 
2765 			mptcp_subflow_set_scheduled(subflow, false);
2766 
2767 			ssk = mptcp_subflow_tcp_sock(subflow);
2768 
2769 			lock_sock(ssk);
2770 
2771 			/* limit retransmission to the bytes already sent on some subflows */
2772 			info.sent = 0;
2773 			info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len :
2774 								    dfrag->already_sent;
2775 
2776 			/*
2777 			 * make the whole retrans decision, xmit, disallow
2778 			 * fallback atomic, note that we can't retrans even
2779 			 * when an infinite fallback is in progress, i.e. new
2780 			 * subflows are disallowed.
2781 			 */
2782 			spin_lock_bh(&msk->fallback_lock);
2783 			if (__mptcp_check_fallback(msk) ||
2784 			    !msk->allow_subflows) {
2785 				spin_unlock_bh(&msk->fallback_lock);
2786 				release_sock(ssk);
2787 				goto clear_scheduled;
2788 			}
2789 
2790 			while (info.sent < info.limit) {
2791 				ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2792 				if (ret <= 0)
2793 					break;
2794 
2795 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2796 				copied += ret;
2797 				info.sent += ret;
2798 			}
2799 			if (copied) {
2800 				len = max(copied, len);
2801 				tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2802 					 info.size_goal);
2803 				msk->allow_infinite_fallback = false;
2804 			}
2805 			spin_unlock_bh(&msk->fallback_lock);
2806 
2807 			release_sock(ssk);
2808 		}
2809 	}
2810 
2811 	msk->bytes_retrans += len;
2812 	dfrag->already_sent = max(dfrag->already_sent, len);
2813 
2814 reset_timer:
2815 	mptcp_check_and_set_pending(sk);
2816 
2817 	if (!mptcp_rtx_timer_pending(sk))
2818 		mptcp_reset_rtx_timer(sk);
2819 
2820 clear_scheduled:
2821 	/* If no rtx data was available or in case of fallback, there
2822 	 * could be left-over scheduled subflows; clear them all
2823 	 * or later xmit could use bad ones
2824 	 */
2825 	mptcp_for_each_subflow(msk, subflow)
2826 		if (READ_ONCE(subflow->scheduled))
2827 			mptcp_subflow_set_scheduled(subflow, false);
2828 }
2829 
2830 /* schedule the timeout timer for the relevant event: either close timeout
2831  * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
2832  */
2833 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
2834 {
2835 	struct sock *sk = (struct sock *)msk;
2836 	unsigned long timeout, close_timeout;
2837 
2838 	if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
2839 		return;
2840 
2841 	close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp -
2842 			tcp_jiffies32 + jiffies + mptcp_close_timeout(sk);
2843 
2844 	/* the close timeout takes precedence on the fail one, and here at least one of
2845 	 * them is active
2846 	 */
2847 	timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
2848 
2849 	sk_reset_timer(sk, &inet_csk(sk)->mptcp_tout_timer, timeout);
2850 }
2851 
2852 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
2853 {
2854 	struct sock *ssk = msk->first;
2855 	bool slow;
2856 
2857 	if (!ssk)
2858 		return;
2859 
2860 	pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
2861 
2862 	slow = lock_sock_fast(ssk);
2863 	mptcp_subflow_reset(ssk);
2864 	WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
2865 	unlock_sock_fast(ssk, slow);
2866 }
2867 
2868 static void mptcp_backlog_purge(struct sock *sk)
2869 {
2870 	struct mptcp_sock *msk = mptcp_sk(sk);
2871 	struct sk_buff *tmp, *skb;
2872 	LIST_HEAD(backlog);
2873 
2874 	mptcp_data_lock(sk);
2875 	list_splice_init(&msk->backlog_list, &backlog);
2876 	msk->backlog_len = 0;
2877 	mptcp_data_unlock(sk);
2878 
2879 	list_for_each_entry_safe(skb, tmp, &backlog, list) {
2880 		mptcp_borrow_fwdmem(sk, skb);
2881 		kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
2882 	}
2883 	sk_mem_reclaim(sk);
2884 }
2885 
2886 static void mptcp_do_fastclose(struct sock *sk)
2887 {
2888 	struct mptcp_subflow_context *subflow, *tmp;
2889 	struct mptcp_sock *msk = mptcp_sk(sk);
2890 
2891 	mptcp_set_state(sk, TCP_CLOSE);
2892 	mptcp_backlog_purge(sk);
2893 	msk->fastclosing = 1;
2894 
2895 	/* Explicitly send the fastclose reset as need */
2896 	if (__mptcp_check_fallback(msk))
2897 		return;
2898 
2899 	mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2900 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2901 
2902 		lock_sock(ssk);
2903 
2904 		/* Some subflow socket states don't allow/need a reset.*/
2905 		if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
2906 			goto unlock;
2907 
2908 		subflow->send_fastclose = 1;
2909 
2910 		/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2911 		 * issue in __tcp_select_window(), see tcp_disconnect().
2912 		 */
2913 		inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS;
2914 
2915 		tcp_send_active_reset(ssk, ssk->sk_allocation,
2916 				      SK_RST_REASON_TCP_ABORT_ON_CLOSE);
2917 unlock:
2918 		release_sock(ssk);
2919 	}
2920 }
2921 
2922 static void mptcp_worker(struct work_struct *work)
2923 {
2924 	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2925 	struct sock *sk = (struct sock *)msk;
2926 	unsigned long fail_tout;
2927 	int state;
2928 
2929 	lock_sock(sk);
2930 	state = sk->sk_state;
2931 	if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
2932 		goto unlock;
2933 
2934 	mptcp_check_fastclose(msk);
2935 
2936 	mptcp_pm_worker(msk);
2937 
2938 	mptcp_check_send_data_fin(sk);
2939 	mptcp_check_data_fin_ack(sk);
2940 	mptcp_check_data_fin(sk);
2941 
2942 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2943 		__mptcp_close_subflow(sk);
2944 
2945 	if (mptcp_close_tout_expired(sk)) {
2946 		struct mptcp_subflow_context *subflow, *tmp;
2947 
2948 		mptcp_do_fastclose(sk);
2949 		mptcp_for_each_subflow_safe(msk, subflow, tmp)
2950 			__mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
2951 		mptcp_close_wake_up(sk);
2952 	}
2953 
2954 	if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) {
2955 		__mptcp_destroy_sock(sk);
2956 		goto unlock;
2957 	}
2958 
2959 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2960 		__mptcp_retrans(sk);
2961 
2962 	fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
2963 	if (fail_tout && time_after(jiffies, fail_tout))
2964 		mptcp_mp_fail_no_response(msk);
2965 
2966 unlock:
2967 	release_sock(sk);
2968 	sock_put(sk);
2969 }
2970 
2971 static void __mptcp_init_sock(struct sock *sk)
2972 {
2973 	struct mptcp_sock *msk = mptcp_sk(sk);
2974 
2975 	INIT_LIST_HEAD(&msk->conn_list);
2976 	INIT_LIST_HEAD(&msk->join_list);
2977 	INIT_LIST_HEAD(&msk->rtx_queue);
2978 	INIT_LIST_HEAD(&msk->backlog_list);
2979 	INIT_WORK(&msk->work, mptcp_worker);
2980 	msk->out_of_order_queue = RB_ROOT;
2981 	msk->first_pending = NULL;
2982 	msk->timer_ival = TCP_RTO_MIN;
2983 	msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
2984 	msk->backlog_len = 0;
2985 
2986 	WRITE_ONCE(msk->first, NULL);
2987 	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
2988 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2989 	msk->allow_infinite_fallback = true;
2990 	msk->allow_subflows = true;
2991 	msk->recovery = false;
2992 	msk->subflow_id = 1;
2993 	msk->last_data_sent = tcp_jiffies32;
2994 	msk->last_data_recv = tcp_jiffies32;
2995 	msk->last_ack_recv = tcp_jiffies32;
2996 
2997 	mptcp_pm_data_init(msk);
2998 	spin_lock_init(&msk->fallback_lock);
2999 
3000 	/* re-use the csk retrans timer for MPTCP-level retrans */
3001 	timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0);
3002 	timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0);
3003 }
3004 
3005 static void mptcp_ca_reset(struct sock *sk)
3006 {
3007 	struct inet_connection_sock *icsk = inet_csk(sk);
3008 
3009 	tcp_assign_congestion_control(sk);
3010 	strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name,
3011 		sizeof(mptcp_sk(sk)->ca_name));
3012 
3013 	/* no need to keep a reference to the ops, the name will suffice */
3014 	tcp_cleanup_congestion_control(sk);
3015 	icsk->icsk_ca_ops = NULL;
3016 }
3017 
3018 static int mptcp_init_sock(struct sock *sk)
3019 {
3020 	struct net *net = sock_net(sk);
3021 	int ret;
3022 
3023 	__mptcp_init_sock(sk);
3024 
3025 	if (!mptcp_is_enabled(net))
3026 		return -ENOPROTOOPT;
3027 
3028 	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
3029 		return -ENOMEM;
3030 
3031 	rcu_read_lock();
3032 	ret = mptcp_init_sched(mptcp_sk(sk),
3033 			       mptcp_sched_find(mptcp_get_scheduler(net)));
3034 	rcu_read_unlock();
3035 	if (ret)
3036 		return ret;
3037 
3038 	set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
3039 
3040 	/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
3041 	 * propagate the correct value
3042 	 */
3043 	mptcp_ca_reset(sk);
3044 
3045 	sk_sockets_allocated_inc(sk);
3046 	sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]);
3047 	sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]);
3048 
3049 	return 0;
3050 }
3051 
3052 static void __mptcp_clear_xmit(struct sock *sk)
3053 {
3054 	struct mptcp_sock *msk = mptcp_sk(sk);
3055 	struct mptcp_data_frag *dtmp, *dfrag;
3056 
3057 	msk->first_pending = NULL;
3058 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
3059 		dfrag_clear(sk, dfrag);
3060 }
3061 
3062 void mptcp_cancel_work(struct sock *sk)
3063 {
3064 	struct mptcp_sock *msk = mptcp_sk(sk);
3065 
3066 	if (cancel_work_sync(&msk->work))
3067 		__sock_put(sk);
3068 }
3069 
3070 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
3071 {
3072 	lock_sock(ssk);
3073 
3074 	switch (ssk->sk_state) {
3075 	case TCP_LISTEN:
3076 		if (!(how & RCV_SHUTDOWN))
3077 			break;
3078 		fallthrough;
3079 	case TCP_SYN_SENT:
3080 		WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK));
3081 		break;
3082 	default:
3083 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
3084 			pr_debug("Fallback\n");
3085 			ssk->sk_shutdown |= how;
3086 			tcp_shutdown(ssk, how);
3087 
3088 			/* simulate the data_fin ack reception to let the state
3089 			 * machine move forward
3090 			 */
3091 			WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
3092 			mptcp_schedule_work(sk);
3093 		} else {
3094 			pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
3095 			tcp_send_ack(ssk);
3096 			if (!mptcp_rtx_timer_pending(sk))
3097 				mptcp_reset_rtx_timer(sk);
3098 		}
3099 		break;
3100 	}
3101 
3102 	release_sock(ssk);
3103 }
3104 
3105 void mptcp_set_state(struct sock *sk, int state)
3106 {
3107 	int oldstate = sk->sk_state;
3108 
3109 	switch (state) {
3110 	case TCP_ESTABLISHED:
3111 		if (oldstate != TCP_ESTABLISHED)
3112 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3113 		break;
3114 	case TCP_CLOSE_WAIT:
3115 		/* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
3116 		 * MPTCP "accepted" sockets will be created later on. So no
3117 		 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
3118 		 */
3119 		break;
3120 	default:
3121 		if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
3122 			MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3123 	}
3124 
3125 	inet_sk_state_store(sk, state);
3126 }
3127 
3128 static const unsigned char new_state[16] = {
3129 	/* current state:     new state:      action:	*/
3130 	[0 /* (Invalid) */] = TCP_CLOSE,
3131 	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3132 	[TCP_SYN_SENT]      = TCP_CLOSE,
3133 	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3134 	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
3135 	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
3136 	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
3137 	[TCP_CLOSE]         = TCP_CLOSE,
3138 	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
3139 	[TCP_LAST_ACK]      = TCP_LAST_ACK,
3140 	[TCP_LISTEN]        = TCP_CLOSE,
3141 	[TCP_CLOSING]       = TCP_CLOSING,
3142 	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
3143 };
3144 
3145 static int mptcp_close_state(struct sock *sk)
3146 {
3147 	int next = (int)new_state[sk->sk_state];
3148 	int ns = next & TCP_STATE_MASK;
3149 
3150 	mptcp_set_state(sk, ns);
3151 
3152 	return next & TCP_ACTION_FIN;
3153 }
3154 
3155 static void mptcp_check_send_data_fin(struct sock *sk)
3156 {
3157 	struct mptcp_subflow_context *subflow;
3158 	struct mptcp_sock *msk = mptcp_sk(sk);
3159 
3160 	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
3161 		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
3162 		 msk->snd_nxt, msk->write_seq);
3163 
3164 	/* we still need to enqueue subflows or not really shutting down,
3165 	 * skip this
3166 	 */
3167 	if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
3168 	    mptcp_send_head(sk))
3169 		return;
3170 
3171 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3172 
3173 	mptcp_for_each_subflow(msk, subflow) {
3174 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
3175 
3176 		mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
3177 	}
3178 }
3179 
3180 static void __mptcp_wr_shutdown(struct sock *sk)
3181 {
3182 	struct mptcp_sock *msk = mptcp_sk(sk);
3183 
3184 	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
3185 		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
3186 		 !!mptcp_send_head(sk));
3187 
3188 	/* will be ignored by fallback sockets */
3189 	WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
3190 	WRITE_ONCE(msk->snd_data_fin_enable, 1);
3191 
3192 	mptcp_check_send_data_fin(sk);
3193 }
3194 
3195 static void __mptcp_destroy_sock(struct sock *sk)
3196 {
3197 	struct mptcp_sock *msk = mptcp_sk(sk);
3198 
3199 	pr_debug("msk=%p\n", msk);
3200 
3201 	might_sleep();
3202 
3203 	mptcp_stop_rtx_timer(sk);
3204 	sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer);
3205 	msk->pm.status = 0;
3206 	mptcp_release_sched(msk);
3207 
3208 	sk->sk_prot->destroy(sk);
3209 
3210 	sk_stream_kill_queues(sk);
3211 	xfrm_sk_free_policy(sk);
3212 
3213 	sock_put(sk);
3214 }
3215 
3216 void __mptcp_unaccepted_force_close(struct sock *sk)
3217 {
3218 	sock_set_flag(sk, SOCK_DEAD);
3219 	mptcp_do_fastclose(sk);
3220 	__mptcp_destroy_sock(sk);
3221 }
3222 
3223 static __poll_t mptcp_check_readable(struct sock *sk)
3224 {
3225 	return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0;
3226 }
3227 
3228 static void mptcp_check_listen_stop(struct sock *sk)
3229 {
3230 	struct sock *ssk;
3231 
3232 	if (inet_sk_state_load(sk) != TCP_LISTEN)
3233 		return;
3234 
3235 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3236 	ssk = mptcp_sk(sk)->first;
3237 	if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN))
3238 		return;
3239 
3240 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
3241 	tcp_set_state(ssk, TCP_CLOSE);
3242 	mptcp_subflow_queue_clean(sk, ssk);
3243 	inet_csk_listen_stop(ssk);
3244 	mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
3245 	release_sock(ssk);
3246 }
3247 
3248 bool __mptcp_close(struct sock *sk, long timeout)
3249 {
3250 	struct mptcp_subflow_context *subflow;
3251 	struct mptcp_sock *msk = mptcp_sk(sk);
3252 	bool do_cancel_work = false;
3253 	int subflows_alive = 0;
3254 
3255 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
3256 
3257 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
3258 		mptcp_check_listen_stop(sk);
3259 		mptcp_set_state(sk, TCP_CLOSE);
3260 		goto cleanup;
3261 	}
3262 
3263 	if (mptcp_data_avail(msk) || timeout < 0) {
3264 		/* If the msk has read data, or the caller explicitly ask it,
3265 		 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
3266 		 */
3267 		mptcp_do_fastclose(sk);
3268 		timeout = 0;
3269 	} else if (mptcp_close_state(sk)) {
3270 		__mptcp_wr_shutdown(sk);
3271 	}
3272 
3273 	sk_stream_wait_close(sk, timeout);
3274 
3275 cleanup:
3276 	/* orphan all the subflows */
3277 	mptcp_for_each_subflow(msk, subflow) {
3278 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3279 		bool slow = lock_sock_fast_nested(ssk);
3280 
3281 		subflows_alive += ssk->sk_state != TCP_CLOSE;
3282 
3283 		/* since the close timeout takes precedence on the fail one,
3284 		 * cancel the latter
3285 		 */
3286 		if (ssk == msk->first)
3287 			subflow->fail_tout = 0;
3288 
3289 		/* detach from the parent socket, but allow data_ready to
3290 		 * push incoming data into the mptcp stack, to properly ack it
3291 		 */
3292 		ssk->sk_socket = NULL;
3293 		ssk->sk_wq = NULL;
3294 		unlock_sock_fast(ssk, slow);
3295 	}
3296 	sock_orphan(sk);
3297 
3298 	/* all the subflows are closed, only timeout can change the msk
3299 	 * state, let's not keep resources busy for no reasons
3300 	 */
3301 	if (subflows_alive == 0)
3302 		mptcp_set_state(sk, TCP_CLOSE);
3303 
3304 	sock_hold(sk);
3305 	pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
3306 	mptcp_pm_connection_closed(msk);
3307 
3308 	if (sk->sk_state == TCP_CLOSE) {
3309 		__mptcp_destroy_sock(sk);
3310 		do_cancel_work = true;
3311 	} else {
3312 		mptcp_start_tout_timer(sk);
3313 	}
3314 
3315 	return do_cancel_work;
3316 }
3317 
3318 static void mptcp_close(struct sock *sk, long timeout)
3319 {
3320 	bool do_cancel_work;
3321 
3322 	lock_sock(sk);
3323 
3324 	do_cancel_work = __mptcp_close(sk, timeout);
3325 	release_sock(sk);
3326 	if (do_cancel_work)
3327 		mptcp_cancel_work(sk);
3328 
3329 	sock_put(sk);
3330 }
3331 
3332 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
3333 {
3334 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3335 	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
3336 	struct ipv6_pinfo *msk6 = inet6_sk(msk);
3337 
3338 	msk->sk_v6_daddr = ssk->sk_v6_daddr;
3339 	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
3340 
3341 	if (msk6 && ssk6) {
3342 		msk6->saddr = ssk6->saddr;
3343 		msk6->flow_label = ssk6->flow_label;
3344 	}
3345 #endif
3346 
3347 	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
3348 	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
3349 	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
3350 	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
3351 	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
3352 	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
3353 }
3354 
3355 static void mptcp_destroy_common(struct mptcp_sock *msk)
3356 {
3357 	struct mptcp_subflow_context *subflow, *tmp;
3358 	struct sock *sk = (struct sock *)msk;
3359 
3360 	__mptcp_clear_xmit(sk);
3361 	mptcp_backlog_purge(sk);
3362 
3363 	/* join list will be eventually flushed (with rst) at sock lock release time */
3364 	mptcp_for_each_subflow_safe(msk, subflow, tmp)
3365 		__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
3366 
3367 	__skb_queue_purge(&sk->sk_receive_queue);
3368 	skb_rbtree_purge(&msk->out_of_order_queue);
3369 
3370 	/* move all the rx fwd alloc into the sk_mem_reclaim_final in
3371 	 * inet_sock_destruct() will dispose it
3372 	 */
3373 	mptcp_token_destroy(msk);
3374 	mptcp_pm_destroy(msk);
3375 }
3376 
3377 static int mptcp_disconnect(struct sock *sk, int flags)
3378 {
3379 	struct mptcp_sock *msk = mptcp_sk(sk);
3380 
3381 	/* We are on the fastopen error path. We can't call straight into the
3382 	 * subflows cleanup code due to lock nesting (we are already under
3383 	 * msk->firstsocket lock).
3384 	 */
3385 	if (msk->fastopening)
3386 		return -EBUSY;
3387 
3388 	mptcp_check_listen_stop(sk);
3389 	mptcp_set_state(sk, TCP_CLOSE);
3390 
3391 	mptcp_stop_rtx_timer(sk);
3392 	mptcp_stop_tout_timer(sk);
3393 
3394 	mptcp_pm_connection_closed(msk);
3395 
3396 	/* msk->subflow is still intact, the following will not free the first
3397 	 * subflow
3398 	 */
3399 	mptcp_do_fastclose(sk);
3400 	mptcp_destroy_common(msk);
3401 
3402 	/* The first subflow is already in TCP_CLOSE status, the following
3403 	 * can't overlap with a fallback anymore
3404 	 */
3405 	spin_lock_bh(&msk->fallback_lock);
3406 	msk->allow_subflows = true;
3407 	msk->allow_infinite_fallback = true;
3408 	WRITE_ONCE(msk->flags, 0);
3409 	spin_unlock_bh(&msk->fallback_lock);
3410 
3411 	msk->cb_flags = 0;
3412 	msk->recovery = false;
3413 	WRITE_ONCE(msk->can_ack, false);
3414 	WRITE_ONCE(msk->fully_established, false);
3415 	WRITE_ONCE(msk->rcv_data_fin, false);
3416 	WRITE_ONCE(msk->snd_data_fin_enable, false);
3417 	WRITE_ONCE(msk->rcv_fastclose, false);
3418 	WRITE_ONCE(msk->use_64bit_ack, false);
3419 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3420 	mptcp_pm_data_reset(msk);
3421 	mptcp_ca_reset(sk);
3422 	msk->bytes_consumed = 0;
3423 	msk->bytes_acked = 0;
3424 	msk->bytes_received = 0;
3425 	msk->bytes_sent = 0;
3426 	msk->bytes_retrans = 0;
3427 	msk->rcvspace_init = 0;
3428 	msk->fastclosing = 0;
3429 
3430 	/* for fallback's sake */
3431 	WRITE_ONCE(msk->ack_seq, 0);
3432 
3433 	WRITE_ONCE(sk->sk_shutdown, 0);
3434 	sk_error_report(sk);
3435 	return 0;
3436 }
3437 
3438 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3439 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
3440 {
3441 	struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk);
3442 
3443 	return &msk6->np;
3444 }
3445 
3446 static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
3447 {
3448 	const struct ipv6_pinfo *np = inet6_sk(sk);
3449 	struct ipv6_txoptions *opt;
3450 	struct ipv6_pinfo *newnp;
3451 
3452 	newnp = inet6_sk(newsk);
3453 
3454 	rcu_read_lock();
3455 	opt = rcu_dereference(np->opt);
3456 	if (opt) {
3457 		opt = ipv6_dup_options(newsk, opt);
3458 		if (!opt)
3459 			net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
3460 	}
3461 	RCU_INIT_POINTER(newnp->opt, opt);
3462 	rcu_read_unlock();
3463 }
3464 #endif
3465 
3466 static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
3467 {
3468 	struct ip_options_rcu *inet_opt, *newopt = NULL;
3469 	const struct inet_sock *inet = inet_sk(sk);
3470 	struct inet_sock *newinet;
3471 
3472 	newinet = inet_sk(newsk);
3473 
3474 	rcu_read_lock();
3475 	inet_opt = rcu_dereference(inet->inet_opt);
3476 	if (inet_opt) {
3477 		newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) +
3478 				      inet_opt->opt.optlen, GFP_ATOMIC);
3479 		if (!newopt)
3480 			net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
3481 	}
3482 	RCU_INIT_POINTER(newinet->inet_opt, newopt);
3483 	rcu_read_unlock();
3484 }
3485 
3486 struct sock *mptcp_sk_clone_init(const struct sock *sk,
3487 				 const struct mptcp_options_received *mp_opt,
3488 				 struct sock *ssk,
3489 				 struct request_sock *req)
3490 {
3491 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
3492 	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
3493 	struct mptcp_subflow_context *subflow;
3494 	struct mptcp_sock *msk;
3495 
3496 	if (!nsk)
3497 		return NULL;
3498 
3499 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3500 	if (nsk->sk_family == AF_INET6)
3501 		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
3502 #endif
3503 
3504 	__mptcp_init_sock(nsk);
3505 
3506 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3507 	if (nsk->sk_family == AF_INET6)
3508 		mptcp_copy_ip6_options(nsk, sk);
3509 	else
3510 #endif
3511 		mptcp_copy_ip_options(nsk, sk);
3512 
3513 	msk = mptcp_sk(nsk);
3514 	WRITE_ONCE(msk->local_key, subflow_req->local_key);
3515 	WRITE_ONCE(msk->token, subflow_req->token);
3516 	msk->in_accept_queue = 1;
3517 	WRITE_ONCE(msk->fully_established, false);
3518 	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
3519 		WRITE_ONCE(msk->csum_enabled, true);
3520 
3521 	WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
3522 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3523 	WRITE_ONCE(msk->snd_una, msk->write_seq);
3524 	WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
3525 	msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
3526 	mptcp_init_sched(msk, mptcp_sk(sk)->sched);
3527 
3528 	/* passive msk is created after the first/MPC subflow */
3529 	msk->subflow_id = 2;
3530 
3531 	sock_reset_flag(nsk, SOCK_RCU_FREE);
3532 	security_inet_csk_clone(nsk, req);
3533 
3534 	/* this can't race with mptcp_close(), as the msk is
3535 	 * not yet exposted to user-space
3536 	 */
3537 	mptcp_set_state(nsk, TCP_ESTABLISHED);
3538 
3539 	/* The msk maintain a ref to each subflow in the connections list */
3540 	WRITE_ONCE(msk->first, ssk);
3541 	subflow = mptcp_subflow_ctx(ssk);
3542 	list_add(&subflow->node, &msk->conn_list);
3543 	sock_hold(ssk);
3544 
3545 	/* new mpc subflow takes ownership of the newly
3546 	 * created mptcp socket
3547 	 */
3548 	mptcp_token_accept(subflow_req, msk);
3549 
3550 	/* set msk addresses early to ensure mptcp_pm_get_local_id()
3551 	 * uses the correct data
3552 	 */
3553 	mptcp_copy_inaddrs(nsk, ssk);
3554 	__mptcp_propagate_sndbuf(nsk, ssk);
3555 
3556 	mptcp_rcv_space_init(msk, ssk);
3557 
3558 	if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
3559 		__mptcp_subflow_fully_established(msk, subflow, mp_opt);
3560 	bh_unlock_sock(nsk);
3561 
3562 	/* note: the newly allocated socket refcount is 2 now */
3563 	return nsk;
3564 }
3565 
3566 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
3567 {
3568 	const struct tcp_sock *tp = tcp_sk(ssk);
3569 
3570 	msk->rcvspace_init = 1;
3571 	msk->rcvq_space.copied = 0;
3572 	msk->rcvq_space.rtt_us = 0;
3573 
3574 	msk->rcvq_space.time = tp->tcp_mstamp;
3575 
3576 	/* initial rcv_space offering made to peer */
3577 	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
3578 				      TCP_INIT_CWND * tp->advmss);
3579 	if (msk->rcvq_space.space == 0)
3580 		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
3581 }
3582 
3583 static void mptcp_destroy(struct sock *sk)
3584 {
3585 	struct mptcp_sock *msk = mptcp_sk(sk);
3586 
3587 	/* allow the following to close even the initial subflow */
3588 	msk->free_first = 1;
3589 	mptcp_destroy_common(msk);
3590 	sk_sockets_allocated_dec(sk);
3591 }
3592 
3593 void __mptcp_data_acked(struct sock *sk)
3594 {
3595 	if (!sock_owned_by_user(sk))
3596 		__mptcp_clean_una(sk);
3597 	else
3598 		__set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
3599 }
3600 
3601 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3602 {
3603 	if (!sock_owned_by_user(sk))
3604 		__mptcp_subflow_push_pending(sk, ssk, false);
3605 	else
3606 		__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3607 }
3608 
3609 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3610 				      BIT(MPTCP_RETRANSMIT) | \
3611 				      BIT(MPTCP_FLUSH_JOIN_LIST))
3612 
3613 /* processes deferred events and flush wmem */
3614 static void mptcp_release_cb(struct sock *sk)
3615 	__must_hold(&sk->sk_lock.slock)
3616 {
3617 	struct mptcp_sock *msk = mptcp_sk(sk);
3618 
3619 	for (;;) {
3620 		unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
3621 		struct list_head join_list, skbs;
3622 		bool spool_bl;
3623 		u32 moved;
3624 
3625 		spool_bl = mptcp_can_spool_backlog(sk, &skbs);
3626 		if (!flags && !spool_bl)
3627 			break;
3628 
3629 		INIT_LIST_HEAD(&join_list);
3630 		list_splice_init(&msk->join_list, &join_list);
3631 
3632 		/* the following actions acquire the subflow socket lock
3633 		 *
3634 		 * 1) can't be invoked in atomic scope
3635 		 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3636 		 *    datapath acquires the msk socket spinlock while helding
3637 		 *    the subflow socket lock
3638 		 */
3639 		msk->cb_flags &= ~flags;
3640 		spin_unlock_bh(&sk->sk_lock.slock);
3641 
3642 		if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
3643 			__mptcp_flush_join_list(sk, &join_list);
3644 		if (flags & BIT(MPTCP_PUSH_PENDING))
3645 			__mptcp_push_pending(sk, 0);
3646 		if (flags & BIT(MPTCP_RETRANSMIT))
3647 			__mptcp_retrans(sk);
3648 		if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) {
3649 			/* notify ack seq update */
3650 			mptcp_cleanup_rbuf(msk, 0);
3651 			sk->sk_data_ready(sk);
3652 		}
3653 
3654 		cond_resched();
3655 		spin_lock_bh(&sk->sk_lock.slock);
3656 		if (spool_bl)
3657 			mptcp_backlog_spooled(sk, moved, &skbs);
3658 	}
3659 
3660 	if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
3661 		__mptcp_clean_una_wakeup(sk);
3662 	if (unlikely(msk->cb_flags)) {
3663 		/* be sure to sync the msk state before taking actions
3664 		 * depending on sk_state (MPTCP_ERROR_REPORT)
3665 		 * On sk release avoid actions depending on the first subflow
3666 		 */
3667 		if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first)
3668 			__mptcp_sync_state(sk, msk->pending_state);
3669 		if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
3670 			__mptcp_error_report(sk);
3671 		if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
3672 			__mptcp_sync_sndbuf(sk);
3673 	}
3674 }
3675 
3676 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3677  * TCP can't schedule delack timer before the subflow is fully established.
3678  * MPTCP uses the delack timer to do 3rd ack retransmissions
3679  */
3680 static void schedule_3rdack_retransmission(struct sock *ssk)
3681 {
3682 	struct inet_connection_sock *icsk = inet_csk(ssk);
3683 	struct tcp_sock *tp = tcp_sk(ssk);
3684 	unsigned long timeout;
3685 
3686 	if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established))
3687 		return;
3688 
3689 	/* reschedule with a timeout above RTT, as we must look only for drop */
3690 	if (tp->srtt_us)
3691 		timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
3692 	else
3693 		timeout = TCP_TIMEOUT_INIT;
3694 	timeout += jiffies;
3695 
3696 	WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
3697 	smp_store_release(&icsk->icsk_ack.pending,
3698 			  icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
3699 	sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
3700 }
3701 
3702 void mptcp_subflow_process_delegated(struct sock *ssk, long status)
3703 {
3704 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3705 	struct sock *sk = subflow->conn;
3706 
3707 	if (status & BIT(MPTCP_DELEGATE_SEND)) {
3708 		mptcp_data_lock(sk);
3709 		if (!sock_owned_by_user(sk))
3710 			__mptcp_subflow_push_pending(sk, ssk, true);
3711 		else
3712 			__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3713 		mptcp_data_unlock(sk);
3714 	}
3715 	if (status & BIT(MPTCP_DELEGATE_SNDBUF)) {
3716 		mptcp_data_lock(sk);
3717 		if (!sock_owned_by_user(sk))
3718 			__mptcp_sync_sndbuf(sk);
3719 		else
3720 			__set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags);
3721 		mptcp_data_unlock(sk);
3722 	}
3723 	if (status & BIT(MPTCP_DELEGATE_ACK))
3724 		schedule_3rdack_retransmission(ssk);
3725 }
3726 
3727 static int mptcp_hash(struct sock *sk)
3728 {
3729 	/* should never be called,
3730 	 * we hash the TCP subflows not the MPTCP socket
3731 	 */
3732 	WARN_ON_ONCE(1);
3733 	return 0;
3734 }
3735 
3736 static void mptcp_unhash(struct sock *sk)
3737 {
3738 	/* called from sk_common_release(), but nothing to do here */
3739 }
3740 
3741 static int mptcp_get_port(struct sock *sk, unsigned short snum)
3742 {
3743 	struct mptcp_sock *msk = mptcp_sk(sk);
3744 
3745 	pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
3746 	if (WARN_ON_ONCE(!msk->first))
3747 		return -EINVAL;
3748 
3749 	return inet_csk_get_port(msk->first, snum);
3750 }
3751 
3752 void mptcp_finish_connect(struct sock *ssk)
3753 {
3754 	struct mptcp_subflow_context *subflow;
3755 	struct mptcp_sock *msk;
3756 	struct sock *sk;
3757 
3758 	subflow = mptcp_subflow_ctx(ssk);
3759 	sk = subflow->conn;
3760 	msk = mptcp_sk(sk);
3761 
3762 	pr_debug("msk=%p, token=%u\n", sk, subflow->token);
3763 
3764 	subflow->map_seq = subflow->iasn;
3765 	subflow->map_subflow_seq = 1;
3766 
3767 	/* the socket is not connected yet, no msk/subflow ops can access/race
3768 	 * accessing the field below
3769 	 */
3770 	WRITE_ONCE(msk->local_key, subflow->local_key);
3771 
3772 	mptcp_pm_new_connection(msk, ssk, 0);
3773 }
3774 
3775 void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3776 {
3777 	write_lock_bh(&sk->sk_callback_lock);
3778 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
3779 	sk_set_socket(sk, parent);
3780 	write_unlock_bh(&sk->sk_callback_lock);
3781 }
3782 
3783 /* Can be called without holding the msk socket lock; use the callback lock
3784  * to avoid {READ_,WRITE_}ONCE annotations on sk_socket.
3785  */
3786 static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk)
3787 {
3788 	struct socket *sock;
3789 
3790 	write_lock_bh(&sk->sk_callback_lock);
3791 	sock = sk->sk_socket;
3792 	write_unlock_bh(&sk->sk_callback_lock);
3793 	if (sock) {
3794 		mptcp_sock_graft(ssk, sock);
3795 		__mptcp_inherit_cgrp_data(sk, ssk);
3796 		__mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC);
3797 	}
3798 }
3799 
3800 bool mptcp_finish_join(struct sock *ssk)
3801 {
3802 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3803 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3804 	struct sock *parent = (void *)msk;
3805 	bool ret = true;
3806 
3807 	pr_debug("msk=%p, subflow=%p\n", msk, subflow);
3808 
3809 	/* mptcp socket already closing? */
3810 	if (!mptcp_is_fully_established(parent)) {
3811 		subflow->reset_reason = MPTCP_RST_EMPTCP;
3812 		return false;
3813 	}
3814 
3815 	/* Active subflow, already present inside the conn_list; is grafted
3816 	 * either by __mptcp_subflow_connect() or accept.
3817 	 */
3818 	if (!list_empty(&subflow->node)) {
3819 		spin_lock_bh(&msk->fallback_lock);
3820 		if (!msk->allow_subflows) {
3821 			spin_unlock_bh(&msk->fallback_lock);
3822 			return false;
3823 		}
3824 		mptcp_subflow_joined(msk, ssk);
3825 		spin_unlock_bh(&msk->fallback_lock);
3826 		mptcp_propagate_sndbuf(parent, ssk);
3827 		return true;
3828 	}
3829 
3830 	if (!mptcp_pm_allow_new_subflow(msk)) {
3831 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_JOINREJECTED);
3832 		goto err_prohibited;
3833 	}
3834 
3835 	/* If we can't acquire msk socket lock here, let the release callback
3836 	 * handle it
3837 	 */
3838 	mptcp_data_lock(parent);
3839 	if (!sock_owned_by_user(parent)) {
3840 		ret = __mptcp_finish_join(msk, ssk);
3841 		if (ret) {
3842 			sock_hold(ssk);
3843 			list_add_tail(&subflow->node, &msk->conn_list);
3844 			mptcp_sock_check_graft(parent, ssk);
3845 		}
3846 	} else {
3847 		sock_hold(ssk);
3848 		list_add_tail(&subflow->node, &msk->join_list);
3849 		__set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
3850 
3851 		/* In case of later failures, __mptcp_flush_join_list() will
3852 		 * properly orphan the ssk via mptcp_close_ssk().
3853 		 */
3854 		mptcp_sock_check_graft(parent, ssk);
3855 	}
3856 	mptcp_data_unlock(parent);
3857 
3858 	if (!ret) {
3859 err_prohibited:
3860 		subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3861 		return false;
3862 	}
3863 
3864 	return true;
3865 }
3866 
3867 static void mptcp_shutdown(struct sock *sk, int how)
3868 {
3869 	pr_debug("sk=%p, how=%d\n", sk, how);
3870 
3871 	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3872 		__mptcp_wr_shutdown(sk);
3873 }
3874 
3875 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
3876 {
3877 	const struct sock *sk = (void *)msk;
3878 	u64 delta;
3879 
3880 	if (sk->sk_state == TCP_LISTEN)
3881 		return -EINVAL;
3882 
3883 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
3884 		return 0;
3885 
3886 	delta = msk->write_seq - v;
3887 	if (__mptcp_check_fallback(msk) && msk->first) {
3888 		struct tcp_sock *tp = tcp_sk(msk->first);
3889 
3890 		/* the first subflow is disconnected after close - see
3891 		 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3892 		 * so ignore that status, too.
3893 		 */
3894 		if (!((1 << msk->first->sk_state) &
3895 		      (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
3896 			delta += READ_ONCE(tp->write_seq) - tp->snd_una;
3897 	}
3898 	if (delta > INT_MAX)
3899 		delta = INT_MAX;
3900 
3901 	return (int)delta;
3902 }
3903 
3904 static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
3905 {
3906 	struct mptcp_sock *msk = mptcp_sk(sk);
3907 	bool slow;
3908 
3909 	switch (cmd) {
3910 	case SIOCINQ:
3911 		if (sk->sk_state == TCP_LISTEN)
3912 			return -EINVAL;
3913 
3914 		lock_sock(sk);
3915 		if (mptcp_move_skbs(sk))
3916 			mptcp_cleanup_rbuf(msk, 0);
3917 		*karg = mptcp_inq_hint(sk);
3918 		release_sock(sk);
3919 		break;
3920 	case SIOCOUTQ:
3921 		slow = lock_sock_fast(sk);
3922 		*karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
3923 		unlock_sock_fast(sk, slow);
3924 		break;
3925 	case SIOCOUTQNSD:
3926 		slow = lock_sock_fast(sk);
3927 		*karg = mptcp_ioctl_outq(msk, msk->snd_nxt);
3928 		unlock_sock_fast(sk, slow);
3929 		break;
3930 	default:
3931 		return -ENOIOCTLCMD;
3932 	}
3933 
3934 	return 0;
3935 }
3936 
3937 static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
3938 			 int addr_len)
3939 {
3940 	struct mptcp_subflow_context *subflow;
3941 	struct mptcp_sock *msk = mptcp_sk(sk);
3942 	int err = -EINVAL;
3943 	struct sock *ssk;
3944 
3945 	ssk = __mptcp_nmpc_sk(msk);
3946 	if (IS_ERR(ssk))
3947 		return PTR_ERR(ssk);
3948 
3949 	mptcp_set_state(sk, TCP_SYN_SENT);
3950 	subflow = mptcp_subflow_ctx(ssk);
3951 #ifdef CONFIG_TCP_MD5SIG
3952 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3953 	 * TCP option space.
3954 	 */
3955 	if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
3956 		mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK);
3957 #endif
3958 	if (subflow->request_mptcp) {
3959 		if (mptcp_active_should_disable(sk))
3960 			mptcp_early_fallback(msk, subflow,
3961 					     MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
3962 		else if (mptcp_token_new_connect(ssk) < 0)
3963 			mptcp_early_fallback(msk, subflow,
3964 					     MPTCP_MIB_TOKENFALLBACKINIT);
3965 	}
3966 
3967 	WRITE_ONCE(msk->write_seq, subflow->idsn);
3968 	WRITE_ONCE(msk->snd_nxt, subflow->idsn);
3969 	WRITE_ONCE(msk->snd_una, subflow->idsn);
3970 	if (likely(!__mptcp_check_fallback(msk)))
3971 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
3972 
3973 	/* if reaching here via the fastopen/sendmsg path, the caller already
3974 	 * acquired the subflow socket lock, too.
3975 	 */
3976 	if (!msk->fastopening)
3977 		lock_sock(ssk);
3978 
3979 	/* the following mirrors closely a very small chunk of code from
3980 	 * __inet_stream_connect()
3981 	 */
3982 	if (ssk->sk_state != TCP_CLOSE)
3983 		goto out;
3984 
3985 	if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) {
3986 		err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len);
3987 		if (err)
3988 			goto out;
3989 	}
3990 
3991 	err = ssk->sk_prot->connect(ssk, uaddr, addr_len);
3992 	if (err < 0)
3993 		goto out;
3994 
3995 	inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk));
3996 
3997 out:
3998 	if (!msk->fastopening)
3999 		release_sock(ssk);
4000 
4001 	/* on successful connect, the msk state will be moved to established by
4002 	 * subflow_finish_connect()
4003 	 */
4004 	if (unlikely(err)) {
4005 		/* avoid leaving a dangling token in an unconnected socket */
4006 		mptcp_token_destroy(msk);
4007 		mptcp_set_state(sk, TCP_CLOSE);
4008 		return err;
4009 	}
4010 
4011 	mptcp_copy_inaddrs(sk, ssk);
4012 	return 0;
4013 }
4014 
4015 static struct proto mptcp_prot = {
4016 	.name		= "MPTCP",
4017 	.owner		= THIS_MODULE,
4018 	.init		= mptcp_init_sock,
4019 	.connect	= mptcp_connect,
4020 	.disconnect	= mptcp_disconnect,
4021 	.close		= mptcp_close,
4022 	.setsockopt	= mptcp_setsockopt,
4023 	.getsockopt	= mptcp_getsockopt,
4024 	.shutdown	= mptcp_shutdown,
4025 	.destroy	= mptcp_destroy,
4026 	.sendmsg	= mptcp_sendmsg,
4027 	.ioctl		= mptcp_ioctl,
4028 	.recvmsg	= mptcp_recvmsg,
4029 	.release_cb	= mptcp_release_cb,
4030 	.hash		= mptcp_hash,
4031 	.unhash		= mptcp_unhash,
4032 	.get_port	= mptcp_get_port,
4033 	.stream_memory_free	= mptcp_stream_memory_free,
4034 	.sockets_allocated	= &mptcp_sockets_allocated,
4035 
4036 	.memory_allocated	= &net_aligned_data.tcp_memory_allocated,
4037 	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
4038 
4039 	.memory_pressure	= &tcp_memory_pressure,
4040 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
4041 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
4042 	.sysctl_mem	= sysctl_tcp_mem,
4043 	.obj_size	= sizeof(struct mptcp_sock),
4044 	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
4045 	.no_autobind	= true,
4046 };
4047 
4048 static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
4049 {
4050 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
4051 	struct sock *ssk, *sk = sock->sk;
4052 	int err = -EINVAL;
4053 
4054 	lock_sock(sk);
4055 	ssk = __mptcp_nmpc_sk(msk);
4056 	if (IS_ERR(ssk)) {
4057 		err = PTR_ERR(ssk);
4058 		goto unlock;
4059 	}
4060 
4061 	if (sk->sk_family == AF_INET)
4062 		err = inet_bind_sk(ssk, uaddr, addr_len);
4063 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4064 	else if (sk->sk_family == AF_INET6)
4065 		err = inet6_bind_sk(ssk, uaddr, addr_len);
4066 #endif
4067 	if (!err)
4068 		mptcp_copy_inaddrs(sk, ssk);
4069 
4070 unlock:
4071 	release_sock(sk);
4072 	return err;
4073 }
4074 
4075 static int mptcp_listen(struct socket *sock, int backlog)
4076 {
4077 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
4078 	struct sock *sk = sock->sk;
4079 	struct sock *ssk;
4080 	int err;
4081 
4082 	pr_debug("msk=%p\n", msk);
4083 
4084 	lock_sock(sk);
4085 
4086 	err = -EINVAL;
4087 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
4088 		goto unlock;
4089 
4090 	ssk = __mptcp_nmpc_sk(msk);
4091 	if (IS_ERR(ssk)) {
4092 		err = PTR_ERR(ssk);
4093 		goto unlock;
4094 	}
4095 
4096 	mptcp_set_state(sk, TCP_LISTEN);
4097 	sock_set_flag(sk, SOCK_RCU_FREE);
4098 
4099 	lock_sock(ssk);
4100 	err = __inet_listen_sk(ssk, backlog);
4101 	release_sock(ssk);
4102 	mptcp_set_state(sk, inet_sk_state_load(ssk));
4103 
4104 	if (!err) {
4105 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
4106 		mptcp_copy_inaddrs(sk, ssk);
4107 		mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
4108 	}
4109 
4110 unlock:
4111 	release_sock(sk);
4112 	return err;
4113 }
4114 
4115 static void mptcp_graft_subflows(struct sock *sk)
4116 {
4117 	struct mptcp_subflow_context *subflow;
4118 	struct mptcp_sock *msk = mptcp_sk(sk);
4119 
4120 	if (mem_cgroup_sockets_enabled) {
4121 		LIST_HEAD(join_list);
4122 
4123 		/* Subflows joining after __inet_accept() will get the
4124 		 * mem CG properly initialized at mptcp_finish_join() time,
4125 		 * but subflows pending in join_list need explicit
4126 		 * initialization before flushing `backlog_unaccounted`
4127 		 * or MPTCP can later unexpectedly observe unaccounted memory.
4128 		 */
4129 		mptcp_data_lock(sk);
4130 		list_splice_init(&msk->join_list, &join_list);
4131 		mptcp_data_unlock(sk);
4132 
4133 		__mptcp_flush_join_list(sk, &join_list);
4134 	}
4135 
4136 	mptcp_for_each_subflow(msk, subflow) {
4137 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4138 
4139 		lock_sock(ssk);
4140 
4141 		/* Set ssk->sk_socket of accept()ed flows to mptcp socket.
4142 		 * This is needed so NOSPACE flag can be set from tcp stack.
4143 		 */
4144 		if (!ssk->sk_socket)
4145 			mptcp_sock_graft(ssk, sk->sk_socket);
4146 
4147 		if (!mem_cgroup_sk_enabled(sk))
4148 			goto unlock;
4149 
4150 		__mptcp_inherit_cgrp_data(sk, ssk);
4151 		__mptcp_inherit_memcg(sk, ssk, GFP_KERNEL);
4152 
4153 unlock:
4154 		release_sock(ssk);
4155 	}
4156 
4157 	if (mem_cgroup_sk_enabled(sk)) {
4158 		gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
4159 		int amt;
4160 
4161 		/* Account the backlog memory; prior accept() is aware of
4162 		 * fwd and rmem only.
4163 		 */
4164 		mptcp_data_lock(sk);
4165 		amt = sk_mem_pages(sk->sk_forward_alloc +
4166 				   msk->backlog_unaccounted +
4167 				   atomic_read(&sk->sk_rmem_alloc)) -
4168 		      sk_mem_pages(sk->sk_forward_alloc +
4169 				   atomic_read(&sk->sk_rmem_alloc));
4170 		msk->backlog_unaccounted = 0;
4171 		mptcp_data_unlock(sk);
4172 
4173 		if (amt)
4174 			mem_cgroup_sk_charge(sk, amt, gfp);
4175 	}
4176 }
4177 
4178 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
4179 			       struct proto_accept_arg *arg)
4180 {
4181 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
4182 	struct sock *ssk, *newsk;
4183 
4184 	pr_debug("msk=%p\n", msk);
4185 
4186 	/* Buggy applications can call accept on socket states other then LISTEN
4187 	 * but no need to allocate the first subflow just to error out.
4188 	 */
4189 	ssk = READ_ONCE(msk->first);
4190 	if (!ssk)
4191 		return -EINVAL;
4192 
4193 	pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
4194 	newsk = inet_csk_accept(ssk, arg);
4195 	if (!newsk)
4196 		return arg->err;
4197 
4198 	pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
4199 	if (sk_is_mptcp(newsk)) {
4200 		struct mptcp_subflow_context *subflow;
4201 		struct sock *new_mptcp_sock;
4202 
4203 		subflow = mptcp_subflow_ctx(newsk);
4204 		new_mptcp_sock = subflow->conn;
4205 
4206 		/* is_mptcp should be false if subflow->conn is missing, see
4207 		 * subflow_syn_recv_sock()
4208 		 */
4209 		if (WARN_ON_ONCE(!new_mptcp_sock)) {
4210 			tcp_sk(newsk)->is_mptcp = 0;
4211 			goto tcpfallback;
4212 		}
4213 
4214 		newsk = new_mptcp_sock;
4215 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
4216 
4217 		newsk->sk_kern_sock = arg->kern;
4218 		lock_sock(newsk);
4219 		__inet_accept(sock, newsock, newsk);
4220 
4221 		set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
4222 		msk = mptcp_sk(newsk);
4223 		msk->in_accept_queue = 0;
4224 
4225 		mptcp_graft_subflows(newsk);
4226 		mptcp_rps_record_subflows(msk);
4227 
4228 		/* Do late cleanup for the first subflow as necessary. Also
4229 		 * deal with bad peers not doing a complete shutdown.
4230 		 */
4231 		if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
4232 			if (unlikely(list_is_singular(&msk->conn_list)))
4233 				mptcp_set_state(newsk, TCP_CLOSE);
4234 			mptcp_close_ssk(newsk, msk->first,
4235 					mptcp_subflow_ctx(msk->first));
4236 		}
4237 	} else {
4238 tcpfallback:
4239 		newsk->sk_kern_sock = arg->kern;
4240 		lock_sock(newsk);
4241 		__inet_accept(sock, newsock, newsk);
4242 		/* we are being invoked after accepting a non-mp-capable
4243 		 * flow: sk is a tcp_sk, not an mptcp one.
4244 		 *
4245 		 * Hand the socket over to tcp so all further socket ops
4246 		 * bypass mptcp.
4247 		 */
4248 		WRITE_ONCE(newsock->sk->sk_socket->ops,
4249 			   mptcp_fallback_tcp_ops(newsock->sk));
4250 	}
4251 	release_sock(newsk);
4252 
4253 	return 0;
4254 }
4255 
4256 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
4257 {
4258 	struct sock *sk = (struct sock *)msk;
4259 
4260 	if (__mptcp_stream_is_writeable(sk, 1))
4261 		return EPOLLOUT | EPOLLWRNORM;
4262 
4263 	set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
4264 	smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
4265 	if (__mptcp_stream_is_writeable(sk, 1))
4266 		return EPOLLOUT | EPOLLWRNORM;
4267 
4268 	return 0;
4269 }
4270 
4271 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
4272 			   struct poll_table_struct *wait)
4273 {
4274 	struct sock *sk = sock->sk;
4275 	struct mptcp_sock *msk;
4276 	__poll_t mask = 0;
4277 	u8 shutdown;
4278 	int state;
4279 
4280 	msk = mptcp_sk(sk);
4281 	sock_poll_wait(file, sock, wait);
4282 
4283 	state = inet_sk_state_load(sk);
4284 	pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
4285 	if (state == TCP_LISTEN) {
4286 		struct sock *ssk = READ_ONCE(msk->first);
4287 
4288 		if (WARN_ON_ONCE(!ssk))
4289 			return 0;
4290 
4291 		return inet_csk_listen_poll(ssk);
4292 	}
4293 
4294 	shutdown = READ_ONCE(sk->sk_shutdown);
4295 	if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
4296 		mask |= EPOLLHUP;
4297 	if (shutdown & RCV_SHUTDOWN)
4298 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
4299 
4300 	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
4301 		mask |= mptcp_check_readable(sk);
4302 		if (shutdown & SEND_SHUTDOWN)
4303 			mask |= EPOLLOUT | EPOLLWRNORM;
4304 		else
4305 			mask |= mptcp_check_writeable(msk);
4306 	} else if (state == TCP_SYN_SENT &&
4307 		   inet_test_bit(DEFER_CONNECT, sk)) {
4308 		/* cf tcp_poll() note about TFO */
4309 		mask |= EPOLLOUT | EPOLLWRNORM;
4310 	}
4311 
4312 	/* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
4313 	smp_rmb();
4314 	if (READ_ONCE(sk->sk_err))
4315 		mask |= EPOLLERR;
4316 
4317 	return mask;
4318 }
4319 
4320 static struct sk_buff *mptcp_recv_skb(struct sock *sk, u32 *off)
4321 {
4322 	struct mptcp_sock *msk = mptcp_sk(sk);
4323 	struct sk_buff *skb;
4324 	u32 offset;
4325 
4326 	if (!list_empty(&msk->backlog_list))
4327 		mptcp_move_skbs(sk);
4328 
4329 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
4330 		offset = MPTCP_SKB_CB(skb)->offset;
4331 		if (offset < skb->len) {
4332 			*off = offset;
4333 			return skb;
4334 		}
4335 		mptcp_eat_recv_skb(sk, skb);
4336 	}
4337 	return NULL;
4338 }
4339 
4340 /*
4341  * Note:
4342  *	- It is assumed that the socket was locked by the caller.
4343  */
4344 static int __mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4345 			     sk_read_actor_t recv_actor, bool noack)
4346 {
4347 	struct mptcp_sock *msk = mptcp_sk(sk);
4348 	struct sk_buff *skb;
4349 	int copied = 0;
4350 	u32 offset;
4351 
4352 	msk_owned_by_me(msk);
4353 
4354 	if (sk->sk_state == TCP_LISTEN)
4355 		return -ENOTCONN;
4356 	while ((skb = mptcp_recv_skb(sk, &offset)) != NULL) {
4357 		u32 data_len = skb->len - offset;
4358 		int count;
4359 		u32 size;
4360 
4361 		size = min_t(size_t, data_len, INT_MAX);
4362 		count = recv_actor(desc, skb, offset, size);
4363 		if (count <= 0) {
4364 			if (!copied)
4365 				copied = count;
4366 			break;
4367 		}
4368 
4369 		copied += count;
4370 
4371 		msk->bytes_consumed += count;
4372 		if (count < data_len) {
4373 			MPTCP_SKB_CB(skb)->offset += count;
4374 			MPTCP_SKB_CB(skb)->map_seq += count;
4375 			break;
4376 		}
4377 
4378 		mptcp_eat_recv_skb(sk, skb);
4379 	}
4380 
4381 	if (noack)
4382 		goto out;
4383 
4384 	mptcp_rcv_space_adjust(msk, copied);
4385 
4386 	if (copied > 0) {
4387 		mptcp_recv_skb(sk, &offset);
4388 		mptcp_cleanup_rbuf(msk, copied);
4389 	}
4390 out:
4391 	return copied;
4392 }
4393 
4394 static int mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4395 			   sk_read_actor_t recv_actor)
4396 {
4397 	return __mptcp_read_sock(sk, desc, recv_actor, false);
4398 }
4399 
4400 static int __mptcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
4401 {
4402 	/* Store TCP splice context information in read_descriptor_t. */
4403 	read_descriptor_t rd_desc = {
4404 		.arg.data = tss,
4405 		.count	  = tss->len,
4406 	};
4407 
4408 	return mptcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
4409 }
4410 
4411 /**
4412  *  mptcp_splice_read - splice data from MPTCP socket to a pipe
4413  * @sock:	socket to splice from
4414  * @ppos:	position (not valid)
4415  * @pipe:	pipe to splice to
4416  * @len:	number of bytes to splice
4417  * @flags:	splice modifier flags
4418  *
4419  * Description:
4420  *    Will read pages from given socket and fill them into a pipe.
4421  *
4422  * Return:
4423  *    Amount of bytes that have been spliced.
4424  *
4425  **/
4426 static ssize_t mptcp_splice_read(struct socket *sock, loff_t *ppos,
4427 				 struct pipe_inode_info *pipe, size_t len,
4428 				 unsigned int flags)
4429 {
4430 	struct tcp_splice_state tss = {
4431 		.pipe	= pipe,
4432 		.len	= len,
4433 		.flags	= flags,
4434 	};
4435 	struct sock *sk = sock->sk;
4436 	ssize_t spliced = 0;
4437 	int ret = 0;
4438 	long timeo;
4439 
4440 	/*
4441 	 * We can't seek on a socket input
4442 	 */
4443 	if (unlikely(*ppos))
4444 		return -ESPIPE;
4445 
4446 	lock_sock(sk);
4447 
4448 	mptcp_rps_record_subflows(mptcp_sk(sk));
4449 
4450 	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
4451 	while (tss.len) {
4452 		ret = __mptcp_splice_read(sk, &tss);
4453 		if (ret < 0) {
4454 			break;
4455 		} else if (!ret) {
4456 			if (spliced)
4457 				break;
4458 			if (sock_flag(sk, SOCK_DONE))
4459 				break;
4460 			if (sk->sk_err) {
4461 				ret = sock_error(sk);
4462 				break;
4463 			}
4464 			if (sk->sk_shutdown & RCV_SHUTDOWN)
4465 				break;
4466 			if (sk->sk_state == TCP_CLOSE) {
4467 				/*
4468 				 * This occurs when user tries to read
4469 				 * from never connected socket.
4470 				 */
4471 				ret = -ENOTCONN;
4472 				break;
4473 			}
4474 			if (!timeo) {
4475 				ret = -EAGAIN;
4476 				break;
4477 			}
4478 			/* if __mptcp_splice_read() got nothing while we have
4479 			 * an skb in receive queue, we do not want to loop.
4480 			 * This might happen with URG data.
4481 			 */
4482 			if (!skb_queue_empty(&sk->sk_receive_queue))
4483 				break;
4484 			ret = sk_wait_data(sk, &timeo, NULL);
4485 			if (ret < 0)
4486 				break;
4487 			if (signal_pending(current)) {
4488 				ret = sock_intr_errno(timeo);
4489 				break;
4490 			}
4491 			continue;
4492 		}
4493 		tss.len -= ret;
4494 		spliced += ret;
4495 
4496 		if (!tss.len || !timeo)
4497 			break;
4498 		release_sock(sk);
4499 		lock_sock(sk);
4500 
4501 		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
4502 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
4503 		    signal_pending(current))
4504 			break;
4505 	}
4506 
4507 	release_sock(sk);
4508 
4509 	if (spliced)
4510 		return spliced;
4511 
4512 	return ret;
4513 }
4514 
4515 static const struct proto_ops mptcp_stream_ops = {
4516 	.family		   = PF_INET,
4517 	.owner		   = THIS_MODULE,
4518 	.release	   = inet_release,
4519 	.bind		   = mptcp_bind,
4520 	.connect	   = inet_stream_connect,
4521 	.socketpair	   = sock_no_socketpair,
4522 	.accept		   = mptcp_stream_accept,
4523 	.getname	   = inet_getname,
4524 	.poll		   = mptcp_poll,
4525 	.ioctl		   = inet_ioctl,
4526 	.gettstamp	   = sock_gettstamp,
4527 	.listen		   = mptcp_listen,
4528 	.shutdown	   = inet_shutdown,
4529 	.setsockopt	   = sock_common_setsockopt,
4530 	.getsockopt	   = sock_common_getsockopt,
4531 	.sendmsg	   = inet_sendmsg,
4532 	.recvmsg	   = inet_recvmsg,
4533 	.mmap		   = sock_no_mmap,
4534 	.set_rcvlowat	   = mptcp_set_rcvlowat,
4535 	.read_sock	   = mptcp_read_sock,
4536 	.splice_read	   = mptcp_splice_read,
4537 };
4538 
4539 static struct inet_protosw mptcp_protosw = {
4540 	.type		= SOCK_STREAM,
4541 	.protocol	= IPPROTO_MPTCP,
4542 	.prot		= &mptcp_prot,
4543 	.ops		= &mptcp_stream_ops,
4544 	.flags		= INET_PROTOSW_ICSK,
4545 };
4546 
4547 static int mptcp_napi_poll(struct napi_struct *napi, int budget)
4548 {
4549 	struct mptcp_delegated_action *delegated;
4550 	struct mptcp_subflow_context *subflow;
4551 	int work_done = 0;
4552 
4553 	delegated = container_of(napi, struct mptcp_delegated_action, napi);
4554 	while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
4555 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4556 
4557 		bh_lock_sock_nested(ssk);
4558 		if (!sock_owned_by_user(ssk)) {
4559 			mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
4560 		} else {
4561 			/* tcp_release_cb_override already processed
4562 			 * the action or will do at next release_sock().
4563 			 * In both case must dequeue the subflow here - on the same
4564 			 * CPU that scheduled it.
4565 			 */
4566 			smp_wmb();
4567 			clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
4568 		}
4569 		bh_unlock_sock(ssk);
4570 		sock_put(ssk);
4571 
4572 		if (++work_done == budget)
4573 			return budget;
4574 	}
4575 
4576 	/* always provide a 0 'work_done' argument, so that napi_complete_done
4577 	 * will not try accessing the NULL napi->dev ptr
4578 	 */
4579 	napi_complete_done(napi, 0);
4580 	return work_done;
4581 }
4582 
4583 void __init mptcp_proto_init(void)
4584 {
4585 	struct mptcp_delegated_action *delegated;
4586 	int cpu;
4587 
4588 	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
4589 
4590 	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
4591 		panic("Failed to allocate MPTCP pcpu counter\n");
4592 
4593 	mptcp_napi_dev = alloc_netdev_dummy(0);
4594 	if (!mptcp_napi_dev)
4595 		panic("Failed to allocate MPTCP dummy netdev\n");
4596 	for_each_possible_cpu(cpu) {
4597 		delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
4598 		INIT_LIST_HEAD(&delegated->head);
4599 		netif_napi_add_tx(mptcp_napi_dev, &delegated->napi,
4600 				  mptcp_napi_poll);
4601 		napi_enable(&delegated->napi);
4602 	}
4603 
4604 	mptcp_subflow_init();
4605 	mptcp_pm_init();
4606 	mptcp_sched_init();
4607 	mptcp_token_init();
4608 
4609 	if (proto_register(&mptcp_prot, 1) != 0)
4610 		panic("Failed to register MPTCP proto.\n");
4611 
4612 	inet_register_protosw(&mptcp_protosw);
4613 
4614 	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
4615 }
4616 
4617 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4618 static const struct proto_ops mptcp_v6_stream_ops = {
4619 	.family		   = PF_INET6,
4620 	.owner		   = THIS_MODULE,
4621 	.release	   = inet6_release,
4622 	.bind		   = mptcp_bind,
4623 	.connect	   = inet_stream_connect,
4624 	.socketpair	   = sock_no_socketpair,
4625 	.accept		   = mptcp_stream_accept,
4626 	.getname	   = inet6_getname,
4627 	.poll		   = mptcp_poll,
4628 	.ioctl		   = inet6_ioctl,
4629 	.gettstamp	   = sock_gettstamp,
4630 	.listen		   = mptcp_listen,
4631 	.shutdown	   = inet_shutdown,
4632 	.setsockopt	   = sock_common_setsockopt,
4633 	.getsockopt	   = sock_common_getsockopt,
4634 	.sendmsg	   = inet6_sendmsg,
4635 	.recvmsg	   = inet6_recvmsg,
4636 	.mmap		   = sock_no_mmap,
4637 #ifdef CONFIG_COMPAT
4638 	.compat_ioctl	   = inet6_compat_ioctl,
4639 #endif
4640 	.set_rcvlowat	   = mptcp_set_rcvlowat,
4641 	.read_sock	   = mptcp_read_sock,
4642 	.splice_read	   = mptcp_splice_read,
4643 };
4644 
4645 static struct proto mptcp_v6_prot;
4646 
4647 static struct inet_protosw mptcp_v6_protosw = {
4648 	.type		= SOCK_STREAM,
4649 	.protocol	= IPPROTO_MPTCP,
4650 	.prot		= &mptcp_v6_prot,
4651 	.ops		= &mptcp_v6_stream_ops,
4652 	.flags		= INET_PROTOSW_ICSK,
4653 };
4654 
4655 int __init mptcp_proto_v6_init(void)
4656 {
4657 	int err;
4658 
4659 	mptcp_v6_prot = mptcp_prot;
4660 	strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name));
4661 	mptcp_v6_prot.slab = NULL;
4662 	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
4663 	mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np);
4664 
4665 	err = proto_register(&mptcp_v6_prot, 1);
4666 	if (err)
4667 		return err;
4668 
4669 	err = inet6_register_protosw(&mptcp_v6_protosw);
4670 	if (err)
4671 		proto_unregister(&mptcp_v6_prot);
4672 
4673 	return err;
4674 }
4675 #endif
4676