xref: /linux/net/mptcp/protocol.c (revision dfecb0c5af3b07ebfa84be63a7a21bfc9e29a872)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
14 #include <net/aligned_data.h>
15 #include <net/rps.h>
16 #include <net/sock.h>
17 #include <net/inet_common.h>
18 #include <net/inet_hashtables.h>
19 #include <net/protocol.h>
20 #include <net/tcp_states.h>
21 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
22 #include <net/transp_v6.h>
23 #endif
24 #include <net/mptcp.h>
25 #include <net/hotdata.h>
26 #include <net/xfrm.h>
27 #include <asm/ioctls.h>
28 #include "protocol.h"
29 #include "mib.h"
30 
31 static unsigned int mptcp_inq_hint(const struct sock *sk);
32 
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/mptcp.h>
35 
36 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
37 struct mptcp6_sock {
38 	struct mptcp_sock msk;
39 	struct ipv6_pinfo np;
40 };
41 #endif
42 
43 enum {
44 	MPTCP_CMSG_TS = BIT(0),
45 	MPTCP_CMSG_INQ = BIT(1),
46 };
47 
48 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
49 
50 static void __mptcp_destroy_sock(struct sock *sk);
51 static void mptcp_check_send_data_fin(struct sock *sk);
52 
53 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = {
54 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
55 };
56 static struct net_device *mptcp_napi_dev;
57 
58 /* Returns end sequence number of the receiver's advertised window */
59 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
60 {
61 	return READ_ONCE(msk->wnd_end);
62 }
63 
64 static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
65 {
66 	unsigned short family = READ_ONCE(sk->sk_family);
67 
68 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
69 	if (family == AF_INET6)
70 		return &inet6_stream_ops;
71 #endif
72 	WARN_ON_ONCE(family != AF_INET);
73 	return &inet_stream_ops;
74 }
75 
76 bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
77 {
78 	struct net *net = sock_net((struct sock *)msk);
79 
80 	if (__mptcp_check_fallback(msk))
81 		return true;
82 
83 	/* The caller possibly is not holding the msk socket lock, but
84 	 * in the fallback case only the current subflow is touching
85 	 * the OoO queue.
86 	 */
87 	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
88 		return false;
89 
90 	spin_lock_bh(&msk->fallback_lock);
91 	if (!msk->allow_infinite_fallback) {
92 		spin_unlock_bh(&msk->fallback_lock);
93 		return false;
94 	}
95 
96 	msk->allow_subflows = false;
97 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
98 	__MPTCP_INC_STATS(net, fb_mib);
99 	spin_unlock_bh(&msk->fallback_lock);
100 	return true;
101 }
102 
103 static int __mptcp_socket_create(struct mptcp_sock *msk)
104 {
105 	struct mptcp_subflow_context *subflow;
106 	struct sock *sk = (struct sock *)msk;
107 	struct socket *ssock;
108 	int err;
109 
110 	err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock);
111 	if (err)
112 		return err;
113 
114 	msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio;
115 	WRITE_ONCE(msk->first, ssock->sk);
116 	subflow = mptcp_subflow_ctx(ssock->sk);
117 	list_add(&subflow->node, &msk->conn_list);
118 	sock_hold(ssock->sk);
119 	subflow->request_mptcp = 1;
120 	subflow->subflow_id = msk->subflow_id++;
121 
122 	/* This is the first subflow, always with id 0 */
123 	WRITE_ONCE(subflow->local_id, 0);
124 	mptcp_sock_graft(msk->first, sk->sk_socket);
125 	iput(SOCK_INODE(ssock));
126 
127 	return 0;
128 }
129 
130 /* If the MPC handshake is not started, returns the first subflow,
131  * eventually allocating it.
132  */
133 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
134 {
135 	struct sock *sk = (struct sock *)msk;
136 	int ret;
137 
138 	if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
139 		return ERR_PTR(-EINVAL);
140 
141 	if (!msk->first) {
142 		ret = __mptcp_socket_create(msk);
143 		if (ret)
144 			return ERR_PTR(ret);
145 	}
146 
147 	return msk->first;
148 }
149 
150 static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
151 {
152 	sk_drops_skbadd(sk, skb);
153 	__kfree_skb(skb);
154 }
155 
156 static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
157 				 struct sk_buff *from, bool *fragstolen,
158 				 int *delta)
159 {
160 	int limit = READ_ONCE(sk->sk_rcvbuf);
161 
162 	if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) ||
163 	    MPTCP_SKB_CB(from)->offset ||
164 	    ((to->len + from->len) > (limit >> 3)) ||
165 	    !skb_try_coalesce(to, from, fragstolen, delta))
166 		return false;
167 
168 	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
169 		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
170 		 to->len, MPTCP_SKB_CB(from)->end_seq);
171 	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
172 	return true;
173 }
174 
175 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
176 			       struct sk_buff *from)
177 {
178 	bool fragstolen;
179 	int delta;
180 
181 	if (!__mptcp_try_coalesce(sk, to, from, &fragstolen, &delta))
182 		return false;
183 
184 	/* note the fwd memory can reach a negative value after accounting
185 	 * for the delta, but the later skb free will restore a non
186 	 * negative one
187 	 */
188 	atomic_add(delta, &sk->sk_rmem_alloc);
189 	sk_mem_charge(sk, delta);
190 	kfree_skb_partial(from, fragstolen);
191 
192 	return true;
193 }
194 
195 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
196 				   struct sk_buff *from)
197 {
198 	if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
199 		return false;
200 
201 	return mptcp_try_coalesce((struct sock *)msk, to, from);
202 }
203 
204 /* "inspired" by tcp_rcvbuf_grow(), main difference:
205  * - mptcp does not maintain a msk-level window clamp
206  * - returns true when  the receive buffer is actually updated
207  */
208 static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
209 {
210 	struct mptcp_sock *msk = mptcp_sk(sk);
211 	const struct net *net = sock_net(sk);
212 	u32 rcvwin, rcvbuf, cap, oldval;
213 	u64 grow;
214 
215 	oldval = msk->rcvq_space.space;
216 	msk->rcvq_space.space = newval;
217 	if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
218 	    (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
219 		return false;
220 
221 	/* DRS is always one RTT late. */
222 	rcvwin = newval << 1;
223 
224 	/* slow start: allow the sender to double its rate. */
225 	grow = (u64)rcvwin * (newval - oldval);
226 	do_div(grow, oldval);
227 	rcvwin += grow << 1;
228 
229 	cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
230 
231 	rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap);
232 	if (rcvbuf > sk->sk_rcvbuf) {
233 		WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
234 		return true;
235 	}
236 	return false;
237 }
238 
239 /* "inspired" by tcp_data_queue_ofo(), main differences:
240  * - use mptcp seqs
241  * - don't cope with sacks
242  */
243 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
244 {
245 	struct sock *sk = (struct sock *)msk;
246 	struct rb_node **p, *parent;
247 	u64 seq, end_seq, max_seq;
248 	struct sk_buff *skb1;
249 
250 	seq = MPTCP_SKB_CB(skb)->map_seq;
251 	end_seq = MPTCP_SKB_CB(skb)->end_seq;
252 	max_seq = atomic64_read(&msk->rcv_wnd_sent);
253 
254 	pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
255 		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
256 	if (after64(end_seq, max_seq)) {
257 		/* out of window */
258 		mptcp_drop(sk, skb);
259 		pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
260 			 (unsigned long long)end_seq - (unsigned long)max_seq,
261 			 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent));
262 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
263 		return;
264 	}
265 
266 	p = &msk->out_of_order_queue.rb_node;
267 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
268 	if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
269 		rb_link_node(&skb->rbnode, NULL, p);
270 		rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
271 		msk->ooo_last_skb = skb;
272 		goto end;
273 	}
274 
275 	/* with 2 subflows, adding at end of ooo queue is quite likely
276 	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
277 	 */
278 	if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
279 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
280 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
281 		return;
282 	}
283 
284 	/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
285 	if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
286 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
287 		parent = &msk->ooo_last_skb->rbnode;
288 		p = &parent->rb_right;
289 		goto insert;
290 	}
291 
292 	/* Find place to insert this segment. Handle overlaps on the way. */
293 	parent = NULL;
294 	while (*p) {
295 		parent = *p;
296 		skb1 = rb_to_skb(parent);
297 		if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
298 			p = &parent->rb_left;
299 			continue;
300 		}
301 		if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
302 			if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
303 				/* All the bits are present. Drop. */
304 				mptcp_drop(sk, skb);
305 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
306 				return;
307 			}
308 			if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
309 				/* partial overlap:
310 				 *     |     skb      |
311 				 *  |     skb1    |
312 				 * continue traversing
313 				 */
314 			} else {
315 				/* skb's seq == skb1's seq and skb covers skb1.
316 				 * Replace skb1 with skb.
317 				 */
318 				rb_replace_node(&skb1->rbnode, &skb->rbnode,
319 						&msk->out_of_order_queue);
320 				mptcp_drop(sk, skb1);
321 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
322 				goto merge_right;
323 			}
324 		} else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
325 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
326 			return;
327 		}
328 		p = &parent->rb_right;
329 	}
330 
331 insert:
332 	/* Insert segment into RB tree. */
333 	rb_link_node(&skb->rbnode, parent, p);
334 	rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
335 
336 merge_right:
337 	/* Remove other segments covered by skb. */
338 	while ((skb1 = skb_rb_next(skb)) != NULL) {
339 		if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
340 			break;
341 		rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
342 		mptcp_drop(sk, skb1);
343 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
344 	}
345 	/* If there is no skb after us, we are the last_skb ! */
346 	if (!skb1)
347 		msk->ooo_last_skb = skb;
348 
349 end:
350 	skb_condense(skb);
351 	skb_set_owner_r(skb, sk);
352 }
353 
354 static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
355 			   int copy_len)
356 {
357 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
358 	bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
359 
360 	/* the skb map_seq accounts for the skb offset:
361 	 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
362 	 * value
363 	 */
364 	MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
365 	MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
366 	MPTCP_SKB_CB(skb)->offset = offset;
367 	MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
368 	MPTCP_SKB_CB(skb)->cant_coalesce = 0;
369 
370 	__skb_unlink(skb, &ssk->sk_receive_queue);
371 
372 	skb_ext_reset(skb);
373 	skb_dst_drop(skb);
374 }
375 
376 static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
377 {
378 	u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq;
379 	struct mptcp_sock *msk = mptcp_sk(sk);
380 	struct sk_buff *tail;
381 
382 	mptcp_borrow_fwdmem(sk, skb);
383 
384 	if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
385 		/* in sequence */
386 		msk->bytes_received += copy_len;
387 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
388 		tail = skb_peek_tail(&sk->sk_receive_queue);
389 		if (tail && mptcp_try_coalesce(sk, tail, skb))
390 			return true;
391 
392 		skb_set_owner_r(skb, sk);
393 		__skb_queue_tail(&sk->sk_receive_queue, skb);
394 		return true;
395 	} else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
396 		mptcp_data_queue_ofo(msk, skb);
397 		return false;
398 	}
399 
400 	/* old data, keep it simple and drop the whole pkt, sender
401 	 * will retransmit as needed, if needed.
402 	 */
403 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
404 	mptcp_drop(sk, skb);
405 	return false;
406 }
407 
408 static void mptcp_stop_rtx_timer(struct sock *sk)
409 {
410 	sk_stop_timer(sk, &sk->mptcp_retransmit_timer);
411 	mptcp_sk(sk)->timer_ival = 0;
412 }
413 
414 static void mptcp_close_wake_up(struct sock *sk)
415 {
416 	if (sock_flag(sk, SOCK_DEAD))
417 		return;
418 
419 	sk->sk_state_change(sk);
420 	if (sk->sk_shutdown == SHUTDOWN_MASK ||
421 	    sk->sk_state == TCP_CLOSE)
422 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
423 	else
424 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
425 }
426 
427 static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
428 {
429 	struct mptcp_subflow_context *subflow;
430 
431 	mptcp_for_each_subflow(msk, subflow) {
432 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
433 		bool slow;
434 
435 		slow = lock_sock_fast(ssk);
436 		tcp_shutdown(ssk, SEND_SHUTDOWN);
437 		unlock_sock_fast(ssk, slow);
438 	}
439 }
440 
441 /* called under the msk socket lock */
442 static bool mptcp_pending_data_fin_ack(struct sock *sk)
443 {
444 	struct mptcp_sock *msk = mptcp_sk(sk);
445 
446 	return ((1 << sk->sk_state) &
447 		(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
448 	       msk->write_seq == READ_ONCE(msk->snd_una);
449 }
450 
451 static void mptcp_check_data_fin_ack(struct sock *sk)
452 {
453 	struct mptcp_sock *msk = mptcp_sk(sk);
454 
455 	/* Look for an acknowledged DATA_FIN */
456 	if (mptcp_pending_data_fin_ack(sk)) {
457 		WRITE_ONCE(msk->snd_data_fin_enable, 0);
458 
459 		switch (sk->sk_state) {
460 		case TCP_FIN_WAIT1:
461 			mptcp_set_state(sk, TCP_FIN_WAIT2);
462 			break;
463 		case TCP_CLOSING:
464 		case TCP_LAST_ACK:
465 			mptcp_shutdown_subflows(msk);
466 			mptcp_set_state(sk, TCP_CLOSE);
467 			break;
468 		}
469 
470 		mptcp_close_wake_up(sk);
471 	}
472 }
473 
474 /* can be called with no lock acquired */
475 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
476 {
477 	struct mptcp_sock *msk = mptcp_sk(sk);
478 
479 	if (READ_ONCE(msk->rcv_data_fin) &&
480 	    ((1 << inet_sk_state_load(sk)) &
481 	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
482 		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
483 
484 		if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
485 			if (seq)
486 				*seq = rcv_data_fin_seq;
487 
488 			return true;
489 		}
490 	}
491 
492 	return false;
493 }
494 
495 static void mptcp_set_datafin_timeout(struct sock *sk)
496 {
497 	struct inet_connection_sock *icsk = inet_csk(sk);
498 	u32 retransmits;
499 
500 	retransmits = min_t(u32, icsk->icsk_retransmits,
501 			    ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
502 
503 	mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
504 }
505 
506 static void __mptcp_set_timeout(struct sock *sk, long tout)
507 {
508 	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
509 }
510 
511 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
512 {
513 	const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
514 
515 	return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
516 	       tcp_timeout_expires(ssk) - jiffies : 0;
517 }
518 
519 static void mptcp_set_timeout(struct sock *sk)
520 {
521 	struct mptcp_subflow_context *subflow;
522 	long tout = 0;
523 
524 	mptcp_for_each_subflow(mptcp_sk(sk), subflow)
525 		tout = max(tout, mptcp_timeout_from_subflow(subflow));
526 	__mptcp_set_timeout(sk, tout);
527 }
528 
529 static inline bool tcp_can_send_ack(const struct sock *ssk)
530 {
531 	return !((1 << inet_sk_state_load(ssk)) &
532 	       (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
533 }
534 
535 void __mptcp_subflow_send_ack(struct sock *ssk)
536 {
537 	if (tcp_can_send_ack(ssk))
538 		tcp_send_ack(ssk);
539 }
540 
541 static void mptcp_subflow_send_ack(struct sock *ssk)
542 {
543 	bool slow;
544 
545 	slow = lock_sock_fast(ssk);
546 	__mptcp_subflow_send_ack(ssk);
547 	unlock_sock_fast(ssk, slow);
548 }
549 
550 static void mptcp_send_ack(struct mptcp_sock *msk)
551 {
552 	struct mptcp_subflow_context *subflow;
553 
554 	mptcp_for_each_subflow(msk, subflow)
555 		mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
556 }
557 
558 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
559 {
560 	bool slow;
561 
562 	slow = lock_sock_fast(ssk);
563 	if (tcp_can_send_ack(ssk))
564 		tcp_cleanup_rbuf(ssk, copied);
565 	unlock_sock_fast(ssk, slow);
566 }
567 
568 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
569 {
570 	const struct inet_connection_sock *icsk = inet_csk(ssk);
571 	u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
572 	const struct tcp_sock *tp = tcp_sk(ssk);
573 
574 	return (ack_pending & ICSK_ACK_SCHED) &&
575 		((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
576 		  READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
577 		 (rx_empty && ack_pending &
578 			      (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
579 }
580 
581 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
582 {
583 	int old_space = READ_ONCE(msk->old_wspace);
584 	struct mptcp_subflow_context *subflow;
585 	struct sock *sk = (struct sock *)msk;
586 	int space =  __mptcp_space(sk);
587 	bool cleanup, rx_empty;
588 
589 	cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
590 	rx_empty = !sk_rmem_alloc_get(sk) && copied;
591 
592 	mptcp_for_each_subflow(msk, subflow) {
593 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
594 
595 		if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
596 			mptcp_subflow_cleanup_rbuf(ssk, copied);
597 	}
598 }
599 
600 static void mptcp_check_data_fin(struct sock *sk)
601 {
602 	struct mptcp_sock *msk = mptcp_sk(sk);
603 	u64 rcv_data_fin_seq;
604 
605 	/* Need to ack a DATA_FIN received from a peer while this side
606 	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
607 	 * msk->rcv_data_fin was set when parsing the incoming options
608 	 * at the subflow level and the msk lock was not held, so this
609 	 * is the first opportunity to act on the DATA_FIN and change
610 	 * the msk state.
611 	 *
612 	 * If we are caught up to the sequence number of the incoming
613 	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
614 	 * not caught up, do nothing and let the recv code send DATA_ACK
615 	 * when catching up.
616 	 */
617 
618 	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
619 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
620 		WRITE_ONCE(msk->rcv_data_fin, 0);
621 
622 		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
623 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
624 
625 		switch (sk->sk_state) {
626 		case TCP_ESTABLISHED:
627 			mptcp_set_state(sk, TCP_CLOSE_WAIT);
628 			break;
629 		case TCP_FIN_WAIT1:
630 			mptcp_set_state(sk, TCP_CLOSING);
631 			break;
632 		case TCP_FIN_WAIT2:
633 			mptcp_shutdown_subflows(msk);
634 			mptcp_set_state(sk, TCP_CLOSE);
635 			break;
636 		default:
637 			/* Other states not expected */
638 			WARN_ON_ONCE(1);
639 			break;
640 		}
641 
642 		if (!__mptcp_check_fallback(msk))
643 			mptcp_send_ack(msk);
644 		mptcp_close_wake_up(sk);
645 	}
646 }
647 
648 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
649 {
650 	if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) {
651 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
652 		mptcp_subflow_reset(ssk);
653 	}
654 }
655 
656 static void __mptcp_add_backlog(struct sock *sk,
657 				struct mptcp_subflow_context *subflow,
658 				struct sk_buff *skb)
659 {
660 	struct mptcp_sock *msk = mptcp_sk(sk);
661 	struct sk_buff *tail = NULL;
662 	struct sock *ssk = skb->sk;
663 	bool fragstolen;
664 	int delta;
665 
666 	if (unlikely(sk->sk_state == TCP_CLOSE)) {
667 		kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
668 		return;
669 	}
670 
671 	/* Try to coalesce with the last skb in our backlog */
672 	if (!list_empty(&msk->backlog_list))
673 		tail = list_last_entry(&msk->backlog_list, struct sk_buff, list);
674 
675 	if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq &&
676 	    ssk == tail->sk &&
677 	    __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) {
678 		skb->truesize -= delta;
679 		kfree_skb_partial(skb, fragstolen);
680 		__mptcp_subflow_lend_fwdmem(subflow, delta);
681 		goto account;
682 	}
683 
684 	list_add_tail(&skb->list, &msk->backlog_list);
685 	mptcp_subflow_lend_fwdmem(subflow, skb);
686 	delta = skb->truesize;
687 
688 account:
689 	WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
690 
691 	/* Possibly not accept()ed yet, keep track of memory not CG
692 	 * accounted, mptcp_graft_subflows() will handle it.
693 	 */
694 	if (!mem_cgroup_from_sk(ssk))
695 		msk->backlog_unaccounted += delta;
696 }
697 
698 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
699 					   struct sock *ssk, bool own_msk)
700 {
701 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
702 	struct sock *sk = (struct sock *)msk;
703 	bool more_data_avail;
704 	struct tcp_sock *tp;
705 	bool ret = false;
706 
707 	pr_debug("msk=%p ssk=%p\n", msk, ssk);
708 	tp = tcp_sk(ssk);
709 	do {
710 		u32 map_remaining, offset;
711 		u32 seq = tp->copied_seq;
712 		struct sk_buff *skb;
713 		bool fin;
714 
715 		/* try to move as much data as available */
716 		map_remaining = subflow->map_data_len -
717 				mptcp_subflow_get_map_offset(subflow);
718 
719 		skb = skb_peek(&ssk->sk_receive_queue);
720 		if (unlikely(!skb))
721 			break;
722 
723 		if (__mptcp_check_fallback(msk)) {
724 			/* Under fallback skbs have no MPTCP extension and TCP could
725 			 * collapse them between the dummy map creation and the
726 			 * current dequeue. Be sure to adjust the map size.
727 			 */
728 			map_remaining = skb->len;
729 			subflow->map_data_len = skb->len;
730 		}
731 
732 		offset = seq - TCP_SKB_CB(skb)->seq;
733 		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
734 		if (fin)
735 			seq++;
736 
737 		if (offset < skb->len) {
738 			size_t len = skb->len - offset;
739 
740 			mptcp_init_skb(ssk, skb, offset, len);
741 
742 			if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) {
743 				mptcp_subflow_lend_fwdmem(subflow, skb);
744 				ret |= __mptcp_move_skb(sk, skb);
745 			} else {
746 				__mptcp_add_backlog(sk, subflow, skb);
747 			}
748 			seq += len;
749 
750 			if (unlikely(map_remaining < len)) {
751 				DEBUG_NET_WARN_ON_ONCE(1);
752 				mptcp_dss_corruption(msk, ssk);
753 			}
754 		} else {
755 			if (unlikely(!fin)) {
756 				DEBUG_NET_WARN_ON_ONCE(1);
757 				mptcp_dss_corruption(msk, ssk);
758 			}
759 
760 			sk_eat_skb(ssk, skb);
761 		}
762 
763 		WRITE_ONCE(tp->copied_seq, seq);
764 		more_data_avail = mptcp_subflow_data_available(ssk);
765 
766 	} while (more_data_avail);
767 
768 	if (ret)
769 		msk->last_data_recv = tcp_jiffies32;
770 	return ret;
771 }
772 
773 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
774 {
775 	struct sock *sk = (struct sock *)msk;
776 	struct sk_buff *skb, *tail;
777 	bool moved = false;
778 	struct rb_node *p;
779 	u64 end_seq;
780 
781 	p = rb_first(&msk->out_of_order_queue);
782 	pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
783 	while (p) {
784 		skb = rb_to_skb(p);
785 		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
786 			break;
787 
788 		p = rb_next(p);
789 		rb_erase(&skb->rbnode, &msk->out_of_order_queue);
790 
791 		if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
792 				      msk->ack_seq))) {
793 			mptcp_drop(sk, skb);
794 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
795 			continue;
796 		}
797 
798 		end_seq = MPTCP_SKB_CB(skb)->end_seq;
799 		tail = skb_peek_tail(&sk->sk_receive_queue);
800 		if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
801 			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
802 
803 			/* skip overlapping data, if any */
804 			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
805 				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
806 				 delta);
807 			MPTCP_SKB_CB(skb)->offset += delta;
808 			MPTCP_SKB_CB(skb)->map_seq += delta;
809 			__skb_queue_tail(&sk->sk_receive_queue, skb);
810 		}
811 		msk->bytes_received += end_seq - msk->ack_seq;
812 		WRITE_ONCE(msk->ack_seq, end_seq);
813 		moved = true;
814 	}
815 	return moved;
816 }
817 
818 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
819 {
820 	int ssk_state;
821 	int err;
822 
823 	/* only propagate errors on fallen-back sockets or
824 	 * on MPC connect
825 	 */
826 	if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk)))
827 		return false;
828 
829 	err = sock_error(ssk);
830 	if (!err)
831 		return false;
832 
833 	/* We need to propagate only transition to CLOSE state.
834 	 * Orphaned socket will see such state change via
835 	 * subflow_sched_work_if_closed() and that path will properly
836 	 * destroy the msk as needed.
837 	 */
838 	ssk_state = inet_sk_state_load(ssk);
839 	if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
840 		mptcp_set_state(sk, ssk_state);
841 	WRITE_ONCE(sk->sk_err, -err);
842 
843 	/* This barrier is coupled with smp_rmb() in mptcp_poll() */
844 	smp_wmb();
845 	sk_error_report(sk);
846 	return true;
847 }
848 
849 void __mptcp_error_report(struct sock *sk)
850 {
851 	struct mptcp_subflow_context *subflow;
852 	struct mptcp_sock *msk = mptcp_sk(sk);
853 
854 	mptcp_for_each_subflow(msk, subflow)
855 		if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
856 			break;
857 }
858 
859 /* In most cases we will be able to lock the mptcp socket.  If its already
860  * owned, we need to defer to the work queue to avoid ABBA deadlock.
861  */
862 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
863 {
864 	struct sock *sk = (struct sock *)msk;
865 	bool moved;
866 
867 	moved = __mptcp_move_skbs_from_subflow(msk, ssk, true);
868 	__mptcp_ofo_queue(msk);
869 	if (unlikely(ssk->sk_err))
870 		__mptcp_subflow_error_report(sk, ssk);
871 
872 	/* If the moves have caught up with the DATA_FIN sequence number
873 	 * it's time to ack the DATA_FIN and change socket state, but
874 	 * this is not a good place to change state. Let the workqueue
875 	 * do it.
876 	 */
877 	if (mptcp_pending_data_fin(sk, NULL))
878 		mptcp_schedule_work(sk);
879 	return moved;
880 }
881 
882 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
883 {
884 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
885 	struct mptcp_sock *msk = mptcp_sk(sk);
886 
887 	/* The peer can send data while we are shutting down this
888 	 * subflow at subflow destruction time, but we must avoid enqueuing
889 	 * more data to the msk receive queue
890 	 */
891 	if (unlikely(subflow->closing))
892 		return;
893 
894 	mptcp_data_lock(sk);
895 	if (!sock_owned_by_user(sk)) {
896 		/* Wake-up the reader only for in-sequence data */
897 		if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
898 			sk->sk_data_ready(sk);
899 	} else {
900 		__mptcp_move_skbs_from_subflow(msk, ssk, false);
901 	}
902 	mptcp_data_unlock(sk);
903 }
904 
905 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
906 {
907 	mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
908 	msk->allow_infinite_fallback = false;
909 	mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
910 }
911 
912 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
913 {
914 	struct sock *sk = (struct sock *)msk;
915 
916 	if (sk->sk_state != TCP_ESTABLISHED)
917 		return false;
918 
919 	spin_lock_bh(&msk->fallback_lock);
920 	if (!msk->allow_subflows) {
921 		spin_unlock_bh(&msk->fallback_lock);
922 		return false;
923 	}
924 	mptcp_subflow_joined(msk, ssk);
925 	spin_unlock_bh(&msk->fallback_lock);
926 
927 	mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
928 	mptcp_sockopt_sync_locked(msk, ssk);
929 	mptcp_stop_tout_timer(sk);
930 	__mptcp_propagate_sndbuf(sk, ssk);
931 	return true;
932 }
933 
934 static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list)
935 {
936 	struct mptcp_subflow_context *tmp, *subflow;
937 	struct mptcp_sock *msk = mptcp_sk(sk);
938 
939 	list_for_each_entry_safe(subflow, tmp, join_list, node) {
940 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
941 		bool slow = lock_sock_fast(ssk);
942 
943 		list_move_tail(&subflow->node, &msk->conn_list);
944 		if (!__mptcp_finish_join(msk, ssk))
945 			mptcp_subflow_reset(ssk);
946 		unlock_sock_fast(ssk, slow);
947 	}
948 }
949 
950 static bool mptcp_rtx_timer_pending(struct sock *sk)
951 {
952 	return timer_pending(&sk->mptcp_retransmit_timer);
953 }
954 
955 static void mptcp_reset_rtx_timer(struct sock *sk)
956 {
957 	unsigned long tout;
958 
959 	/* prevent rescheduling on close */
960 	if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
961 		return;
962 
963 	tout = mptcp_sk(sk)->timer_ival;
964 	sk_reset_timer(sk, &sk->mptcp_retransmit_timer, jiffies + tout);
965 }
966 
967 bool mptcp_schedule_work(struct sock *sk)
968 {
969 	if (inet_sk_state_load(sk) == TCP_CLOSE)
970 		return false;
971 
972 	/* Get a reference on this socket, mptcp_worker() will release it.
973 	 * As mptcp_worker() might complete before us, we can not avoid
974 	 * a sock_hold()/sock_put() if schedule_work() returns false.
975 	 */
976 	sock_hold(sk);
977 
978 	if (schedule_work(&mptcp_sk(sk)->work))
979 		return true;
980 
981 	sock_put(sk);
982 	return false;
983 }
984 
985 static bool mptcp_skb_can_collapse_to(u64 write_seq,
986 				      const struct sk_buff *skb,
987 				      const struct mptcp_ext *mpext)
988 {
989 	if (!tcp_skb_can_collapse_to(skb))
990 		return false;
991 
992 	/* can collapse only if MPTCP level sequence is in order and this
993 	 * mapping has not been xmitted yet
994 	 */
995 	return mpext && mpext->data_seq + mpext->data_len == write_seq &&
996 	       !mpext->frozen;
997 }
998 
999 /* we can append data to the given data frag if:
1000  * - there is space available in the backing page_frag
1001  * - the data frag tail matches the current page_frag free offset
1002  * - the data frag end sequence number matches the current write seq
1003  */
1004 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
1005 				       const struct page_frag *pfrag,
1006 				       const struct mptcp_data_frag *df)
1007 {
1008 	return df && !df->eor &&
1009 		pfrag->page == df->page &&
1010 		pfrag->size - pfrag->offset > 0 &&
1011 		pfrag->offset == (df->offset + df->data_len) &&
1012 		df->data_seq + df->data_len == msk->write_seq;
1013 }
1014 
1015 static void dfrag_uncharge(struct sock *sk, int len)
1016 {
1017 	sk_mem_uncharge(sk, len);
1018 	sk_wmem_queued_add(sk, -len);
1019 }
1020 
1021 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
1022 {
1023 	int len = dfrag->data_len + dfrag->overhead;
1024 
1025 	list_del(&dfrag->list);
1026 	dfrag_uncharge(sk, len);
1027 	put_page(dfrag->page);
1028 }
1029 
1030 /* called under both the msk socket lock and the data lock */
1031 static void __mptcp_clean_una(struct sock *sk)
1032 {
1033 	struct mptcp_sock *msk = mptcp_sk(sk);
1034 	struct mptcp_data_frag *dtmp, *dfrag;
1035 	u64 snd_una;
1036 
1037 	snd_una = msk->snd_una;
1038 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1039 		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1040 			break;
1041 
1042 		if (unlikely(dfrag == msk->first_pending)) {
1043 			/* in recovery mode can see ack after the current snd head */
1044 			if (WARN_ON_ONCE(!msk->recovery))
1045 				break;
1046 
1047 			msk->first_pending = mptcp_send_next(sk);
1048 		}
1049 
1050 		dfrag_clear(sk, dfrag);
1051 	}
1052 
1053 	dfrag = mptcp_rtx_head(sk);
1054 	if (dfrag && after64(snd_una, dfrag->data_seq)) {
1055 		u64 delta = snd_una - dfrag->data_seq;
1056 
1057 		/* prevent wrap around in recovery mode */
1058 		if (unlikely(delta > dfrag->already_sent)) {
1059 			if (WARN_ON_ONCE(!msk->recovery))
1060 				goto out;
1061 			if (WARN_ON_ONCE(delta > dfrag->data_len))
1062 				goto out;
1063 			dfrag->already_sent += delta - dfrag->already_sent;
1064 		}
1065 
1066 		dfrag->data_seq += delta;
1067 		dfrag->offset += delta;
1068 		dfrag->data_len -= delta;
1069 		dfrag->already_sent -= delta;
1070 
1071 		dfrag_uncharge(sk, delta);
1072 	}
1073 
1074 	/* all retransmitted data acked, recovery completed */
1075 	if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1076 		msk->recovery = false;
1077 
1078 out:
1079 	if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
1080 		if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1081 			mptcp_stop_rtx_timer(sk);
1082 	} else {
1083 		mptcp_reset_rtx_timer(sk);
1084 	}
1085 
1086 	if (mptcp_pending_data_fin_ack(sk))
1087 		mptcp_schedule_work(sk);
1088 }
1089 
1090 static void __mptcp_clean_una_wakeup(struct sock *sk)
1091 {
1092 	lockdep_assert_held_once(&sk->sk_lock.slock);
1093 
1094 	__mptcp_clean_una(sk);
1095 	mptcp_write_space(sk);
1096 }
1097 
1098 static void mptcp_clean_una_wakeup(struct sock *sk)
1099 {
1100 	mptcp_data_lock(sk);
1101 	__mptcp_clean_una_wakeup(sk);
1102 	mptcp_data_unlock(sk);
1103 }
1104 
1105 static void mptcp_enter_memory_pressure(struct sock *sk)
1106 {
1107 	struct mptcp_subflow_context *subflow;
1108 	struct mptcp_sock *msk = mptcp_sk(sk);
1109 	bool first = true;
1110 
1111 	mptcp_for_each_subflow(msk, subflow) {
1112 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1113 
1114 		if (first && !ssk->sk_bypass_prot_mem) {
1115 			tcp_enter_memory_pressure(ssk);
1116 			first = false;
1117 		}
1118 
1119 		sk_stream_moderate_sndbuf(ssk);
1120 	}
1121 	__mptcp_sync_sndbuf(sk);
1122 }
1123 
1124 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1125  * data
1126  */
1127 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1128 {
1129 	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1130 					pfrag, sk->sk_allocation)))
1131 		return true;
1132 
1133 	mptcp_enter_memory_pressure(sk);
1134 	return false;
1135 }
1136 
1137 static struct mptcp_data_frag *
1138 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1139 		      int orig_offset)
1140 {
1141 	int offset = ALIGN(orig_offset, sizeof(long));
1142 	struct mptcp_data_frag *dfrag;
1143 
1144 	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1145 	dfrag->data_len = 0;
1146 	dfrag->data_seq = msk->write_seq;
1147 	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1148 	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1149 	dfrag->already_sent = 0;
1150 	dfrag->page = pfrag->page;
1151 	dfrag->eor = 0;
1152 
1153 	return dfrag;
1154 }
1155 
1156 struct mptcp_sendmsg_info {
1157 	int mss_now;
1158 	int size_goal;
1159 	u16 limit;
1160 	u16 sent;
1161 	unsigned int flags;
1162 	bool data_lock_held;
1163 };
1164 
1165 static size_t mptcp_check_allowed_size(const struct mptcp_sock *msk,
1166 				       struct sock *ssk, u64 data_seq,
1167 				       size_t avail_size)
1168 {
1169 	u64 window_end = mptcp_wnd_end(msk);
1170 	u64 mptcp_snd_wnd;
1171 
1172 	if (__mptcp_check_fallback(msk))
1173 		return avail_size;
1174 
1175 	mptcp_snd_wnd = window_end - data_seq;
1176 	avail_size = min(mptcp_snd_wnd, avail_size);
1177 
1178 	if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) {
1179 		tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd);
1180 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED);
1181 	}
1182 
1183 	return avail_size;
1184 }
1185 
1186 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1187 {
1188 	struct skb_ext *mpext = __skb_ext_alloc(gfp);
1189 
1190 	if (!mpext)
1191 		return false;
1192 	__skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1193 	return true;
1194 }
1195 
1196 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1197 {
1198 	struct sk_buff *skb;
1199 
1200 	skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1201 	if (likely(skb)) {
1202 		if (likely(__mptcp_add_ext(skb, gfp))) {
1203 			skb_reserve(skb, MAX_TCP_HEADER);
1204 			skb->ip_summed = CHECKSUM_PARTIAL;
1205 			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
1206 			return skb;
1207 		}
1208 		__kfree_skb(skb);
1209 	} else {
1210 		mptcp_enter_memory_pressure(sk);
1211 	}
1212 	return NULL;
1213 }
1214 
1215 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1216 {
1217 	struct sk_buff *skb;
1218 
1219 	skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1220 	if (!skb)
1221 		return NULL;
1222 
1223 	if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1224 		tcp_skb_entail(ssk, skb);
1225 		return skb;
1226 	}
1227 	tcp_skb_tsorted_anchor_cleanup(skb);
1228 	kfree_skb(skb);
1229 	return NULL;
1230 }
1231 
1232 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
1233 {
1234 	gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
1235 
1236 	return __mptcp_alloc_tx_skb(sk, ssk, gfp);
1237 }
1238 
1239 /* note: this always recompute the csum on the whole skb, even
1240  * if we just appended a single frag. More status info needed
1241  */
1242 static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
1243 {
1244 	struct mptcp_ext *mpext = mptcp_get_ext(skb);
1245 	__wsum csum = ~csum_unfold(mpext->csum);
1246 	int offset = skb->len - added;
1247 
1248 	mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
1249 }
1250 
1251 static void mptcp_update_infinite_map(struct mptcp_sock *msk,
1252 				      struct sock *ssk,
1253 				      struct mptcp_ext *mpext)
1254 {
1255 	if (!mpext)
1256 		return;
1257 
1258 	mpext->infinite_map = 1;
1259 	mpext->data_len = 0;
1260 
1261 	if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) {
1262 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
1263 		mptcp_subflow_reset(ssk);
1264 		return;
1265 	}
1266 
1267 	mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
1268 }
1269 
1270 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
1271 
1272 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1273 			      struct mptcp_data_frag *dfrag,
1274 			      struct mptcp_sendmsg_info *info)
1275 {
1276 	u64 data_seq = dfrag->data_seq + info->sent;
1277 	int offset = dfrag->offset + info->sent;
1278 	struct mptcp_sock *msk = mptcp_sk(sk);
1279 	bool zero_window_probe = false;
1280 	struct mptcp_ext *mpext = NULL;
1281 	bool can_coalesce = false;
1282 	bool reuse_skb = true;
1283 	struct sk_buff *skb;
1284 	size_t copy;
1285 	int i;
1286 
1287 	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
1288 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1289 
1290 	if (WARN_ON_ONCE(info->sent > info->limit ||
1291 			 info->limit > dfrag->data_len))
1292 		return 0;
1293 
1294 	if (unlikely(!__tcp_can_send(ssk)))
1295 		return -EAGAIN;
1296 
1297 	/* compute send limit */
1298 	if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
1299 		ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
1300 	info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1301 	copy = info->size_goal;
1302 
1303 	skb = tcp_write_queue_tail(ssk);
1304 	if (skb && copy > skb->len) {
1305 		/* Limit the write to the size available in the
1306 		 * current skb, if any, so that we create at most a new skb.
1307 		 * Explicitly tells TCP internals to avoid collapsing on later
1308 		 * queue management operation, to avoid breaking the ext <->
1309 		 * SSN association set here
1310 		 */
1311 		mpext = mptcp_get_ext(skb);
1312 		if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
1313 			TCP_SKB_CB(skb)->eor = 1;
1314 			tcp_mark_push(tcp_sk(ssk), skb);
1315 			goto alloc_skb;
1316 		}
1317 
1318 		i = skb_shinfo(skb)->nr_frags;
1319 		can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
1320 		if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
1321 			tcp_mark_push(tcp_sk(ssk), skb);
1322 			goto alloc_skb;
1323 		}
1324 
1325 		copy -= skb->len;
1326 	} else {
1327 alloc_skb:
1328 		skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
1329 		if (!skb)
1330 			return -ENOMEM;
1331 
1332 		i = skb_shinfo(skb)->nr_frags;
1333 		reuse_skb = false;
1334 		mpext = mptcp_get_ext(skb);
1335 	}
1336 
1337 	/* Zero window and all data acked? Probe. */
1338 	copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy);
1339 	if (copy == 0) {
1340 		u64 snd_una = READ_ONCE(msk->snd_una);
1341 
1342 		/* No need for zero probe if there are any data pending
1343 		 * either at the msk or ssk level; skb is the current write
1344 		 * queue tail and can be empty at this point.
1345 		 */
1346 		if (snd_una != msk->snd_nxt || skb->len ||
1347 		    skb != tcp_send_head(ssk)) {
1348 			tcp_remove_empty_skb(ssk);
1349 			return 0;
1350 		}
1351 
1352 		zero_window_probe = true;
1353 		data_seq = snd_una - 1;
1354 		copy = 1;
1355 	}
1356 
1357 	copy = min_t(size_t, copy, info->limit - info->sent);
1358 	if (!sk_wmem_schedule(ssk, copy)) {
1359 		tcp_remove_empty_skb(ssk);
1360 		return -ENOMEM;
1361 	}
1362 
1363 	if (can_coalesce) {
1364 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1365 	} else {
1366 		get_page(dfrag->page);
1367 		skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
1368 	}
1369 
1370 	skb->len += copy;
1371 	skb->data_len += copy;
1372 	skb->truesize += copy;
1373 	sk_wmem_queued_add(ssk, copy);
1374 	sk_mem_charge(ssk, copy);
1375 	WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
1376 	TCP_SKB_CB(skb)->end_seq += copy;
1377 	tcp_skb_pcount_set(skb, 0);
1378 
1379 	/* on skb reuse we just need to update the DSS len */
1380 	if (reuse_skb) {
1381 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1382 		mpext->data_len += copy;
1383 		goto out;
1384 	}
1385 
1386 	memset(mpext, 0, sizeof(*mpext));
1387 	mpext->data_seq = data_seq;
1388 	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1389 	mpext->data_len = copy;
1390 	mpext->use_map = 1;
1391 	mpext->dsn64 = 1;
1392 
1393 	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
1394 		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1395 		 mpext->dsn64);
1396 
1397 	if (zero_window_probe) {
1398 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
1399 		mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1400 		mpext->frozen = 1;
1401 		if (READ_ONCE(msk->csum_enabled))
1402 			mptcp_update_data_checksum(skb, copy);
1403 		tcp_push_pending_frames(ssk);
1404 		return 0;
1405 	}
1406 out:
1407 	if (READ_ONCE(msk->csum_enabled))
1408 		mptcp_update_data_checksum(skb, copy);
1409 	if (mptcp_subflow_ctx(ssk)->send_infinite_map)
1410 		mptcp_update_infinite_map(msk, ssk, mpext);
1411 	trace_mptcp_sendmsg_frag(mpext);
1412 	mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1413 
1414 	/* if this is the last chunk of a dfrag with MSG_EOR set,
1415 	 * mark the skb to prevent coalescing with subsequent data.
1416 	 */
1417 	if (dfrag->eor && info->sent + copy >= dfrag->data_len)
1418 		TCP_SKB_CB(skb)->eor = 1;
1419 
1420 	return copy;
1421 }
1422 
1423 #define MPTCP_SEND_BURST_SIZE		((1 << 16) - \
1424 					 sizeof(struct tcphdr) - \
1425 					 MAX_TCP_OPTION_SPACE - \
1426 					 sizeof(struct ipv6hdr) - \
1427 					 sizeof(struct frag_hdr))
1428 
1429 struct subflow_send_info {
1430 	struct sock *ssk;
1431 	u64 linger_time;
1432 };
1433 
1434 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1435 {
1436 	if (!subflow->stale)
1437 		return;
1438 
1439 	subflow->stale = 0;
1440 	MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1441 }
1442 
1443 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1444 {
1445 	if (unlikely(subflow->stale)) {
1446 		u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1447 
1448 		if (subflow->stale_rcv_tstamp == rcv_tstamp)
1449 			return false;
1450 
1451 		mptcp_subflow_set_active(subflow);
1452 	}
1453 	return __mptcp_subflow_active(subflow);
1454 }
1455 
1456 #define SSK_MODE_ACTIVE	0
1457 #define SSK_MODE_BACKUP	1
1458 #define SSK_MODE_MAX	2
1459 
1460 /* implement the mptcp packet scheduler;
1461  * returns the subflow that will transmit the next DSS
1462  * additionally updates the rtx timeout
1463  */
1464 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1465 {
1466 	struct subflow_send_info send_info[SSK_MODE_MAX];
1467 	struct mptcp_subflow_context *subflow;
1468 	struct sock *sk = (struct sock *)msk;
1469 	u32 pace, burst, wmem;
1470 	int i, nr_active = 0;
1471 	struct sock *ssk;
1472 	u64 linger_time;
1473 	long tout = 0;
1474 
1475 	/* pick the subflow with the lower wmem/wspace ratio */
1476 	for (i = 0; i < SSK_MODE_MAX; ++i) {
1477 		send_info[i].ssk = NULL;
1478 		send_info[i].linger_time = -1;
1479 	}
1480 
1481 	mptcp_for_each_subflow(msk, subflow) {
1482 		bool backup = subflow->backup || subflow->request_bkup;
1483 
1484 		trace_mptcp_subflow_get_send(subflow);
1485 		ssk =  mptcp_subflow_tcp_sock(subflow);
1486 		if (!mptcp_subflow_active(subflow))
1487 			continue;
1488 
1489 		tout = max(tout, mptcp_timeout_from_subflow(subflow));
1490 		nr_active += !backup;
1491 		pace = subflow->avg_pacing_rate;
1492 		if (unlikely(!pace)) {
1493 			/* init pacing rate from socket */
1494 			subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1495 			pace = subflow->avg_pacing_rate;
1496 			if (!pace)
1497 				continue;
1498 		}
1499 
1500 		linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
1501 		if (linger_time < send_info[backup].linger_time) {
1502 			send_info[backup].ssk = ssk;
1503 			send_info[backup].linger_time = linger_time;
1504 		}
1505 	}
1506 	__mptcp_set_timeout(sk, tout);
1507 
1508 	/* pick the best backup if no other subflow is active */
1509 	if (!nr_active)
1510 		send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
1511 
1512 	/* According to the blest algorithm, to avoid HoL blocking for the
1513 	 * faster flow, we need to:
1514 	 * - estimate the faster flow linger time
1515 	 * - use the above to estimate the amount of byte transferred
1516 	 *   by the faster flow
1517 	 * - check that the amount of queued data is greater than the above,
1518 	 *   otherwise do not use the picked, slower, subflow
1519 	 * We select the subflow with the shorter estimated time to flush
1520 	 * the queued mem, which basically ensure the above. We just need
1521 	 * to check that subflow has a non empty cwin.
1522 	 */
1523 	ssk = send_info[SSK_MODE_ACTIVE].ssk;
1524 	if (!ssk || !sk_stream_memory_free(ssk))
1525 		return NULL;
1526 
1527 	burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
1528 	wmem = READ_ONCE(ssk->sk_wmem_queued);
1529 	if (!burst)
1530 		return ssk;
1531 
1532 	subflow = mptcp_subflow_ctx(ssk);
1533 	subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
1534 					   READ_ONCE(ssk->sk_pacing_rate) * burst,
1535 					   burst + wmem);
1536 	msk->snd_burst = burst;
1537 	return ssk;
1538 }
1539 
1540 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
1541 {
1542 	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1543 	release_sock(ssk);
1544 }
1545 
1546 static void mptcp_update_post_push(struct mptcp_sock *msk,
1547 				   struct mptcp_data_frag *dfrag,
1548 				   u32 sent)
1549 {
1550 	u64 snd_nxt_new = dfrag->data_seq;
1551 
1552 	dfrag->already_sent += sent;
1553 
1554 	msk->snd_burst -= sent;
1555 
1556 	snd_nxt_new += dfrag->already_sent;
1557 
1558 	/* snd_nxt_new can be smaller than snd_nxt in case mptcp
1559 	 * is recovering after a failover. In that event, this re-sends
1560 	 * old segments.
1561 	 *
1562 	 * Thus compute snd_nxt_new candidate based on
1563 	 * the dfrag->data_seq that was sent and the data
1564 	 * that has been handed to the subflow for transmission
1565 	 * and skip update in case it was old dfrag.
1566 	 */
1567 	if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
1568 		msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
1569 		WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
1570 	}
1571 }
1572 
1573 void mptcp_check_and_set_pending(struct sock *sk)
1574 {
1575 	if (mptcp_send_head(sk)) {
1576 		mptcp_data_lock(sk);
1577 		mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING);
1578 		mptcp_data_unlock(sk);
1579 	}
1580 }
1581 
1582 static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
1583 				  struct mptcp_sendmsg_info *info)
1584 {
1585 	struct mptcp_sock *msk = mptcp_sk(sk);
1586 	struct mptcp_data_frag *dfrag;
1587 	int len, copied = 0, err = 0;
1588 
1589 	while ((dfrag = mptcp_send_head(sk))) {
1590 		info->sent = dfrag->already_sent;
1591 		info->limit = dfrag->data_len;
1592 		len = dfrag->data_len - dfrag->already_sent;
1593 		while (len > 0) {
1594 			int ret = 0;
1595 
1596 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info);
1597 			if (ret <= 0) {
1598 				err = copied ? : ret;
1599 				goto out;
1600 			}
1601 
1602 			info->sent += ret;
1603 			copied += ret;
1604 			len -= ret;
1605 
1606 			mptcp_update_post_push(msk, dfrag, ret);
1607 		}
1608 		msk->first_pending = mptcp_send_next(sk);
1609 
1610 		if (msk->snd_burst <= 0 ||
1611 		    !sk_stream_memory_free(ssk) ||
1612 		    !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
1613 			err = copied;
1614 			goto out;
1615 		}
1616 		mptcp_set_timeout(sk);
1617 	}
1618 	err = copied;
1619 
1620 out:
1621 	if (err > 0)
1622 		msk->last_data_sent = tcp_jiffies32;
1623 	return err;
1624 }
1625 
1626 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
1627 {
1628 	struct sock *prev_ssk = NULL, *ssk = NULL;
1629 	struct mptcp_sock *msk = mptcp_sk(sk);
1630 	struct mptcp_sendmsg_info info = {
1631 				.flags = flags,
1632 	};
1633 	bool copied = false;
1634 	int push_count = 1;
1635 
1636 	while (mptcp_send_head(sk) && (push_count > 0)) {
1637 		struct mptcp_subflow_context *subflow;
1638 		int ret = 0;
1639 
1640 		if (mptcp_sched_get_send(msk))
1641 			break;
1642 
1643 		push_count = 0;
1644 
1645 		mptcp_for_each_subflow(msk, subflow) {
1646 			if (READ_ONCE(subflow->scheduled)) {
1647 				mptcp_subflow_set_scheduled(subflow, false);
1648 
1649 				prev_ssk = ssk;
1650 				ssk = mptcp_subflow_tcp_sock(subflow);
1651 				if (ssk != prev_ssk) {
1652 					/* First check. If the ssk has changed since
1653 					 * the last round, release prev_ssk
1654 					 */
1655 					if (prev_ssk)
1656 						mptcp_push_release(prev_ssk, &info);
1657 
1658 					/* Need to lock the new subflow only if different
1659 					 * from the previous one, otherwise we are still
1660 					 * helding the relevant lock
1661 					 */
1662 					lock_sock(ssk);
1663 				}
1664 
1665 				push_count++;
1666 
1667 				ret = __subflow_push_pending(sk, ssk, &info);
1668 				if (ret <= 0) {
1669 					if (ret != -EAGAIN ||
1670 					    (1 << ssk->sk_state) &
1671 					     (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE))
1672 						push_count--;
1673 					continue;
1674 				}
1675 				copied = true;
1676 			}
1677 		}
1678 	}
1679 
1680 	/* at this point we held the socket lock for the last subflow we used */
1681 	if (ssk)
1682 		mptcp_push_release(ssk, &info);
1683 
1684 	/* Avoid scheduling the rtx timer if no data has been pushed; the timer
1685 	 * will be updated on positive acks by __mptcp_cleanup_una().
1686 	 */
1687 	if (copied) {
1688 		if (!mptcp_rtx_timer_pending(sk))
1689 			mptcp_reset_rtx_timer(sk);
1690 		mptcp_check_send_data_fin(sk);
1691 	}
1692 }
1693 
1694 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
1695 {
1696 	struct mptcp_sock *msk = mptcp_sk(sk);
1697 	struct mptcp_sendmsg_info info = {
1698 		.data_lock_held = true,
1699 	};
1700 	bool keep_pushing = true;
1701 	struct sock *xmit_ssk;
1702 	int copied = 0;
1703 
1704 	info.flags = 0;
1705 	while (mptcp_send_head(sk) && keep_pushing) {
1706 		struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1707 		int ret = 0;
1708 
1709 		/* check for a different subflow usage only after
1710 		 * spooling the first chunk of data
1711 		 */
1712 		if (first) {
1713 			mptcp_subflow_set_scheduled(subflow, false);
1714 			ret = __subflow_push_pending(sk, ssk, &info);
1715 			first = false;
1716 			if (ret <= 0)
1717 				break;
1718 			copied += ret;
1719 			continue;
1720 		}
1721 
1722 		if (mptcp_sched_get_send(msk))
1723 			goto out;
1724 
1725 		if (READ_ONCE(subflow->scheduled)) {
1726 			mptcp_subflow_set_scheduled(subflow, false);
1727 			ret = __subflow_push_pending(sk, ssk, &info);
1728 			if (ret <= 0)
1729 				keep_pushing = false;
1730 			copied += ret;
1731 		}
1732 
1733 		mptcp_for_each_subflow(msk, subflow) {
1734 			if (READ_ONCE(subflow->scheduled)) {
1735 				xmit_ssk = mptcp_subflow_tcp_sock(subflow);
1736 				if (xmit_ssk != ssk) {
1737 					mptcp_subflow_delegate(subflow,
1738 							       MPTCP_DELEGATE_SEND);
1739 					keep_pushing = false;
1740 				}
1741 			}
1742 		}
1743 	}
1744 
1745 out:
1746 	/* __mptcp_alloc_tx_skb could have released some wmem and we are
1747 	 * not going to flush it via release_sock()
1748 	 */
1749 	if (copied) {
1750 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1751 			 info.size_goal);
1752 		if (!mptcp_rtx_timer_pending(sk))
1753 			mptcp_reset_rtx_timer(sk);
1754 
1755 		if (msk->snd_data_fin_enable &&
1756 		    msk->snd_nxt + 1 == msk->write_seq)
1757 			mptcp_schedule_work(sk);
1758 	}
1759 }
1760 
1761 static int mptcp_disconnect(struct sock *sk, int flags);
1762 
1763 static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1764 				  size_t len, int *copied_syn)
1765 {
1766 	unsigned int saved_flags = msg->msg_flags;
1767 	struct mptcp_sock *msk = mptcp_sk(sk);
1768 	struct sock *ssk;
1769 	int ret;
1770 
1771 	/* on flags based fastopen the mptcp is supposed to create the
1772 	 * first subflow right now. Otherwise we are in the defer_connect
1773 	 * path, and the first subflow must be already present.
1774 	 * Since the defer_connect flag is cleared after the first succsful
1775 	 * fastopen attempt, no need to check for additional subflow status.
1776 	 */
1777 	if (msg->msg_flags & MSG_FASTOPEN) {
1778 		ssk = __mptcp_nmpc_sk(msk);
1779 		if (IS_ERR(ssk))
1780 			return PTR_ERR(ssk);
1781 	}
1782 	if (!msk->first)
1783 		return -EINVAL;
1784 
1785 	ssk = msk->first;
1786 
1787 	lock_sock(ssk);
1788 	msg->msg_flags |= MSG_DONTWAIT;
1789 	msk->fastopening = 1;
1790 	ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
1791 	msk->fastopening = 0;
1792 	msg->msg_flags = saved_flags;
1793 	release_sock(ssk);
1794 
1795 	/* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1796 	if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) {
1797 		ret = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1798 					    msg->msg_namelen, msg->msg_flags, 1);
1799 
1800 		/* Keep the same behaviour of plain TCP: zero the copied bytes in
1801 		 * case of any error, except timeout or signal
1802 		 */
1803 		if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
1804 			*copied_syn = 0;
1805 	} else if (ret && ret != -EINPROGRESS) {
1806 		/* The disconnect() op called by tcp_sendmsg_fastopen()/
1807 		 * __inet_stream_connect() can fail, due to looking check,
1808 		 * see mptcp_disconnect().
1809 		 * Attempt it again outside the problematic scope.
1810 		 */
1811 		if (!mptcp_disconnect(sk, 0)) {
1812 			sk->sk_disconnects++;
1813 			sk->sk_socket->state = SS_UNCONNECTED;
1814 		}
1815 	}
1816 	inet_clear_bit(DEFER_CONNECT, sk);
1817 
1818 	return ret;
1819 }
1820 
1821 static int do_copy_data_nocache(struct sock *sk, int copy,
1822 				struct iov_iter *from, char *to)
1823 {
1824 	if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1825 		if (!copy_from_iter_full_nocache(to, copy, from))
1826 			return -EFAULT;
1827 	} else if (!copy_from_iter_full(to, copy, from)) {
1828 		return -EFAULT;
1829 	}
1830 	return 0;
1831 }
1832 
1833 /* open-code sk_stream_memory_free() plus sent limit computation to
1834  * avoid indirect calls in fast-path.
1835  * Called under the msk socket lock, so we can avoid a bunch of ONCE
1836  * annotations.
1837  */
1838 static u32 mptcp_send_limit(const struct sock *sk)
1839 {
1840 	const struct mptcp_sock *msk = mptcp_sk(sk);
1841 	u32 limit, not_sent;
1842 
1843 	if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
1844 		return 0;
1845 
1846 	limit = mptcp_notsent_lowat(sk);
1847 	if (limit == UINT_MAX)
1848 		return UINT_MAX;
1849 
1850 	not_sent = msk->write_seq - msk->snd_nxt;
1851 	if (not_sent >= limit)
1852 		return 0;
1853 
1854 	return limit - not_sent;
1855 }
1856 
1857 static void mptcp_rps_record_subflows(const struct mptcp_sock *msk)
1858 {
1859 	struct mptcp_subflow_context *subflow;
1860 
1861 	if (!rfs_is_needed())
1862 		return;
1863 
1864 	mptcp_for_each_subflow(msk, subflow) {
1865 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1866 
1867 		sock_rps_record_flow(ssk);
1868 	}
1869 }
1870 
1871 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1872 {
1873 	struct mptcp_sock *msk = mptcp_sk(sk);
1874 	struct page_frag *pfrag;
1875 	size_t copied = 0;
1876 	int ret = 0;
1877 	long timeo;
1878 
1879 	/* silently ignore everything else */
1880 	msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1881 			  MSG_FASTOPEN | MSG_EOR;
1882 
1883 	lock_sock(sk);
1884 
1885 	mptcp_rps_record_subflows(msk);
1886 
1887 	if (unlikely(inet_test_bit(DEFER_CONNECT, sk) ||
1888 		     msg->msg_flags & MSG_FASTOPEN)) {
1889 		int copied_syn = 0;
1890 
1891 		ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn);
1892 		copied += copied_syn;
1893 		if (ret == -EINPROGRESS && copied_syn > 0)
1894 			goto out;
1895 		else if (ret)
1896 			goto do_error;
1897 	}
1898 
1899 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1900 
1901 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1902 		ret = sk_stream_wait_connect(sk, &timeo);
1903 		if (ret)
1904 			goto do_error;
1905 	}
1906 
1907 	ret = -EPIPE;
1908 	if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)))
1909 		goto do_error;
1910 
1911 	pfrag = sk_page_frag(sk);
1912 
1913 	while (msg_data_left(msg)) {
1914 		int total_ts, frag_truesize = 0;
1915 		struct mptcp_data_frag *dfrag;
1916 		bool dfrag_collapsed;
1917 		size_t psize, offset;
1918 		u32 copy_limit;
1919 
1920 		/* ensure fitting the notsent_lowat() constraint */
1921 		copy_limit = mptcp_send_limit(sk);
1922 		if (!copy_limit)
1923 			goto wait_for_memory;
1924 
1925 		/* reuse tail pfrag, if possible, or carve a new one from the
1926 		 * page allocator
1927 		 */
1928 		dfrag = mptcp_pending_tail(sk);
1929 		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1930 		if (!dfrag_collapsed) {
1931 			if (!mptcp_page_frag_refill(sk, pfrag))
1932 				goto wait_for_memory;
1933 
1934 			dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1935 			frag_truesize = dfrag->overhead;
1936 		}
1937 
1938 		/* we do not bound vs wspace, to allow a single packet.
1939 		 * memory accounting will prevent execessive memory usage
1940 		 * anyway
1941 		 */
1942 		offset = dfrag->offset + dfrag->data_len;
1943 		psize = pfrag->size - offset;
1944 		psize = min_t(size_t, psize, msg_data_left(msg));
1945 		psize = min_t(size_t, psize, copy_limit);
1946 		total_ts = psize + frag_truesize;
1947 
1948 		if (!sk_wmem_schedule(sk, total_ts))
1949 			goto wait_for_memory;
1950 
1951 		ret = do_copy_data_nocache(sk, psize, &msg->msg_iter,
1952 					   page_address(dfrag->page) + offset);
1953 		if (ret)
1954 			goto do_error;
1955 
1956 		/* data successfully copied into the write queue */
1957 		sk_forward_alloc_add(sk, -total_ts);
1958 		copied += psize;
1959 		dfrag->data_len += psize;
1960 		frag_truesize += psize;
1961 		pfrag->offset += frag_truesize;
1962 		WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1963 
1964 		/* charge data on mptcp pending queue to the msk socket
1965 		 * Note: we charge such data both to sk and ssk
1966 		 */
1967 		sk_wmem_queued_add(sk, frag_truesize);
1968 		if (!dfrag_collapsed) {
1969 			get_page(dfrag->page);
1970 			list_add_tail(&dfrag->list, &msk->rtx_queue);
1971 			if (!msk->first_pending)
1972 				msk->first_pending = dfrag;
1973 		}
1974 		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
1975 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
1976 			 !dfrag_collapsed);
1977 
1978 		continue;
1979 
1980 wait_for_memory:
1981 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1982 		__mptcp_push_pending(sk, msg->msg_flags);
1983 		ret = sk_stream_wait_memory(sk, &timeo);
1984 		if (ret)
1985 			goto do_error;
1986 	}
1987 
1988 	if (copied) {
1989 		/* mark the last dfrag with EOR if MSG_EOR was set */
1990 		if (msg->msg_flags & MSG_EOR) {
1991 			struct mptcp_data_frag *dfrag = mptcp_pending_tail(sk);
1992 
1993 			if (dfrag)
1994 				dfrag->eor = 1;
1995 		}
1996 		__mptcp_push_pending(sk, msg->msg_flags);
1997 	}
1998 
1999 out:
2000 	release_sock(sk);
2001 	return copied;
2002 
2003 do_error:
2004 	if (copied)
2005 		goto out;
2006 
2007 	copied = sk_stream_error(sk, msg->msg_flags, ret);
2008 	goto out;
2009 }
2010 
2011 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
2012 
2013 static void mptcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
2014 {
2015 	/* avoid the indirect call, we know the destructor is sock_rfree */
2016 	skb->destructor = NULL;
2017 	skb->sk = NULL;
2018 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2019 	sk_mem_uncharge(sk, skb->truesize);
2020 	__skb_unlink(skb, &sk->sk_receive_queue);
2021 	skb_attempt_defer_free(skb);
2022 }
2023 
2024 static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
2025 				size_t len, int flags, int copied_total,
2026 				struct scm_timestamping_internal *tss,
2027 				int *cmsg_flags, struct sk_buff **last)
2028 {
2029 	struct mptcp_sock *msk = mptcp_sk(sk);
2030 	struct sk_buff *skb, *tmp;
2031 	int total_data_len = 0;
2032 	int copied = 0;
2033 
2034 	skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
2035 		u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
2036 		u32 data_len = skb->len - offset;
2037 		u32 count;
2038 		int err;
2039 
2040 		if (flags & MSG_PEEK) {
2041 			/* skip already peeked skbs */
2042 			if (total_data_len + data_len <= copied_total) {
2043 				total_data_len += data_len;
2044 				*last = skb;
2045 				continue;
2046 			}
2047 
2048 			/* skip the already peeked data in the current skb */
2049 			delta = copied_total - total_data_len;
2050 			offset += delta;
2051 			data_len -= delta;
2052 		}
2053 
2054 		count = min_t(size_t, len - copied, data_len);
2055 		if (!(flags & MSG_TRUNC)) {
2056 			err = skb_copy_datagram_msg(skb, offset, msg, count);
2057 			if (unlikely(err < 0)) {
2058 				if (!copied)
2059 					return err;
2060 				break;
2061 			}
2062 		}
2063 
2064 		if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
2065 			tcp_update_recv_tstamps(skb, tss);
2066 			*cmsg_flags |= MPTCP_CMSG_TS;
2067 		}
2068 
2069 		copied += count;
2070 
2071 		if (!(flags & MSG_PEEK)) {
2072 			msk->bytes_consumed += count;
2073 			if (count < data_len) {
2074 				MPTCP_SKB_CB(skb)->offset += count;
2075 				MPTCP_SKB_CB(skb)->map_seq += count;
2076 				break;
2077 			}
2078 
2079 			mptcp_eat_recv_skb(sk, skb);
2080 		} else {
2081 			*last = skb;
2082 		}
2083 
2084 		if (copied >= len)
2085 			break;
2086 	}
2087 
2088 	mptcp_rcv_space_adjust(msk, copied);
2089 	return copied;
2090 }
2091 
2092 static void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2093 {
2094 	const struct tcp_sock *tp = tcp_sk(ssk);
2095 
2096 	msk->rcvspace_init = 1;
2097 	msk->rcvq_space.copied = 0;
2098 	msk->rcvq_space.rtt_us = 0;
2099 
2100 	/* initial rcv_space offering made to peer */
2101 	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
2102 				      TCP_INIT_CWND * tp->advmss);
2103 	if (msk->rcvq_space.space == 0)
2104 		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
2105 }
2106 
2107 /* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
2108  *
2109  * Only difference: Use highest rtt estimate of the subflows in use.
2110  */
2111 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
2112 {
2113 	struct mptcp_subflow_context *subflow;
2114 	struct sock *sk = (struct sock *)msk;
2115 	u8 scaling_ratio = U8_MAX;
2116 	u32 time, advmss = 1;
2117 	u64 rtt_us, mstamp;
2118 
2119 	msk_owned_by_me(msk);
2120 
2121 	if (copied <= 0)
2122 		return;
2123 
2124 	if (!msk->rcvspace_init)
2125 		mptcp_rcv_space_init(msk, msk->first);
2126 
2127 	msk->rcvq_space.copied += copied;
2128 
2129 	mstamp = mptcp_stamp();
2130 	time = tcp_stamp_us_delta(mstamp, READ_ONCE(msk->rcvq_space.time));
2131 
2132 	rtt_us = msk->rcvq_space.rtt_us;
2133 	if (rtt_us && time < (rtt_us >> 3))
2134 		return;
2135 
2136 	rtt_us = 0;
2137 	mptcp_for_each_subflow(msk, subflow) {
2138 		const struct tcp_sock *tp;
2139 		u64 sf_rtt_us;
2140 		u32 sf_advmss;
2141 
2142 		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
2143 
2144 		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
2145 		sf_advmss = READ_ONCE(tp->advmss);
2146 
2147 		rtt_us = max(sf_rtt_us, rtt_us);
2148 		advmss = max(sf_advmss, advmss);
2149 		scaling_ratio = min(tp->scaling_ratio, scaling_ratio);
2150 	}
2151 
2152 	msk->rcvq_space.rtt_us = rtt_us;
2153 	msk->scaling_ratio = scaling_ratio;
2154 	if (time < (rtt_us >> 3) || rtt_us == 0)
2155 		return;
2156 
2157 	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
2158 		goto new_measure;
2159 
2160 	trace_mptcp_rcvbuf_grow(sk, time);
2161 	if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {
2162 		/* Make subflows follow along.  If we do not do this, we
2163 		 * get drops at subflow level if skbs can't be moved to
2164 		 * the mptcp rx queue fast enough (announced rcv_win can
2165 		 * exceed ssk->sk_rcvbuf).
2166 		 */
2167 		mptcp_for_each_subflow(msk, subflow) {
2168 			struct sock *ssk;
2169 			bool slow;
2170 
2171 			ssk = mptcp_subflow_tcp_sock(subflow);
2172 			slow = lock_sock_fast(ssk);
2173 			/* subflows can be added before tcp_init_transfer() */
2174 			if (tcp_sk(ssk)->rcvq_space.space)
2175 				tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);
2176 			unlock_sock_fast(ssk, slow);
2177 		}
2178 	}
2179 
2180 new_measure:
2181 	msk->rcvq_space.copied = 0;
2182 	msk->rcvq_space.time = mstamp;
2183 }
2184 
2185 static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta)
2186 {
2187 	struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list);
2188 	struct mptcp_sock *msk = mptcp_sk(sk);
2189 	bool moved = false;
2190 
2191 	*delta = 0;
2192 	while (1) {
2193 		/* If the msk recvbuf is full stop, don't drop */
2194 		if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2195 			break;
2196 
2197 		prefetch(skb->next);
2198 		list_del(&skb->list);
2199 		*delta += skb->truesize;
2200 
2201 		moved |= __mptcp_move_skb(sk, skb);
2202 		if (list_empty(skbs))
2203 			break;
2204 
2205 		skb = list_first_entry(skbs, struct sk_buff, list);
2206 	}
2207 
2208 	__mptcp_ofo_queue(msk);
2209 	if (moved)
2210 		mptcp_check_data_fin((struct sock *)msk);
2211 	return moved;
2212 }
2213 
2214 static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs)
2215 {
2216 	struct mptcp_sock *msk = mptcp_sk(sk);
2217 
2218 	/* After CG initialization, subflows should never add skb before
2219 	 * gaining the CG themself.
2220 	 */
2221 	DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket &&
2222 			       mem_cgroup_from_sk(sk));
2223 
2224 	/* Don't spool the backlog if the rcvbuf is full. */
2225 	if (list_empty(&msk->backlog_list) ||
2226 	    sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2227 		return false;
2228 
2229 	INIT_LIST_HEAD(skbs);
2230 	list_splice_init(&msk->backlog_list, skbs);
2231 	return true;
2232 }
2233 
2234 static void mptcp_backlog_spooled(struct sock *sk, u32 moved,
2235 				  struct list_head *skbs)
2236 {
2237 	struct mptcp_sock *msk = mptcp_sk(sk);
2238 
2239 	WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved);
2240 	list_splice(skbs, &msk->backlog_list);
2241 }
2242 
2243 static bool mptcp_move_skbs(struct sock *sk)
2244 {
2245 	struct list_head skbs;
2246 	bool enqueued = false;
2247 	u32 moved;
2248 
2249 	mptcp_data_lock(sk);
2250 	while (mptcp_can_spool_backlog(sk, &skbs)) {
2251 		mptcp_data_unlock(sk);
2252 		enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
2253 
2254 		mptcp_data_lock(sk);
2255 		mptcp_backlog_spooled(sk, moved, &skbs);
2256 	}
2257 	mptcp_data_unlock(sk);
2258 	return enqueued;
2259 }
2260 
2261 static unsigned int mptcp_inq_hint(const struct sock *sk)
2262 {
2263 	const struct mptcp_sock *msk = mptcp_sk(sk);
2264 	const struct sk_buff *skb;
2265 
2266 	skb = skb_peek(&sk->sk_receive_queue);
2267 	if (skb) {
2268 		u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
2269 
2270 		if (hint_val >= INT_MAX)
2271 			return INT_MAX;
2272 
2273 		return (unsigned int)hint_val;
2274 	}
2275 
2276 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
2277 		return 1;
2278 
2279 	return 0;
2280 }
2281 
2282 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2283 			 int flags)
2284 {
2285 	struct mptcp_sock *msk = mptcp_sk(sk);
2286 	struct scm_timestamping_internal tss;
2287 	int copied = 0, cmsg_flags = 0;
2288 	int target;
2289 	long timeo;
2290 
2291 	/* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2292 	if (unlikely(flags & MSG_ERRQUEUE))
2293 		return inet_recv_error(sk, msg, len);
2294 
2295 	lock_sock(sk);
2296 	if (unlikely(sk->sk_state == TCP_LISTEN)) {
2297 		copied = -ENOTCONN;
2298 		goto out_err;
2299 	}
2300 
2301 	mptcp_rps_record_subflows(msk);
2302 
2303 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2304 
2305 	len = min_t(size_t, len, INT_MAX);
2306 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2307 
2308 	if (unlikely(msk->recvmsg_inq))
2309 		cmsg_flags = MPTCP_CMSG_INQ;
2310 
2311 	while (copied < len) {
2312 		struct sk_buff *last = NULL;
2313 		int err, bytes_read;
2314 
2315 		bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
2316 						  copied, &tss, &cmsg_flags,
2317 						  &last);
2318 		if (unlikely(bytes_read < 0)) {
2319 			if (!copied)
2320 				copied = bytes_read;
2321 			goto out_err;
2322 		}
2323 
2324 		copied += bytes_read;
2325 
2326 		if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk))
2327 			continue;
2328 
2329 		/* only the MPTCP socket status is relevant here. The exit
2330 		 * conditions mirror closely tcp_recvmsg()
2331 		 */
2332 		if (copied >= target)
2333 			break;
2334 
2335 		if (copied) {
2336 			if (tcp_recv_should_stop(sk) ||
2337 			    !timeo)
2338 				break;
2339 		} else {
2340 			if (sk->sk_err) {
2341 				copied = sock_error(sk);
2342 				break;
2343 			}
2344 
2345 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2346 				break;
2347 
2348 			if (sk->sk_state == TCP_CLOSE) {
2349 				copied = -ENOTCONN;
2350 				break;
2351 			}
2352 
2353 			if (!timeo) {
2354 				copied = -EAGAIN;
2355 				break;
2356 			}
2357 
2358 			if (signal_pending(current)) {
2359 				copied = sock_intr_errno(timeo);
2360 				break;
2361 			}
2362 		}
2363 
2364 		pr_debug("block timeout %ld\n", timeo);
2365 		mptcp_cleanup_rbuf(msk, copied);
2366 		err = sk_wait_data(sk, &timeo, last);
2367 		if (err < 0) {
2368 			err = copied ? : err;
2369 			goto out_err;
2370 		}
2371 	}
2372 
2373 	mptcp_cleanup_rbuf(msk, copied);
2374 
2375 out_err:
2376 	if (cmsg_flags && copied >= 0) {
2377 		if (cmsg_flags & MPTCP_CMSG_TS)
2378 			tcp_recv_timestamp(msg, sk, &tss);
2379 
2380 		if (cmsg_flags & MPTCP_CMSG_INQ) {
2381 			unsigned int inq = mptcp_inq_hint(sk);
2382 
2383 			put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2384 		}
2385 	}
2386 
2387 	pr_debug("msk=%p rx queue empty=%d copied=%d\n",
2388 		 msk, skb_queue_empty(&sk->sk_receive_queue), copied);
2389 
2390 	release_sock(sk);
2391 	return copied;
2392 }
2393 
2394 static void mptcp_retransmit_timer(struct timer_list *t)
2395 {
2396 	struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer);
2397 	struct mptcp_sock *msk = mptcp_sk(sk);
2398 
2399 	bh_lock_sock(sk);
2400 	if (!sock_owned_by_user(sk)) {
2401 		/* we need a process context to retransmit */
2402 		if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2403 			mptcp_schedule_work(sk);
2404 	} else {
2405 		/* delegate our work to tcp_release_cb() */
2406 		__set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
2407 	}
2408 	bh_unlock_sock(sk);
2409 	sock_put(sk);
2410 }
2411 
2412 static void mptcp_tout_timer(struct timer_list *t)
2413 {
2414 	struct inet_connection_sock *icsk =
2415 		timer_container_of(icsk, t, mptcp_tout_timer);
2416 	struct sock *sk = &icsk->icsk_inet.sk;
2417 
2418 	mptcp_schedule_work(sk);
2419 	sock_put(sk);
2420 }
2421 
2422 /* Find an idle subflow.  Return NULL if there is unacked data at tcp
2423  * level.
2424  *
2425  * A backup subflow is returned only if that is the only kind available.
2426  */
2427 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2428 {
2429 	struct sock *backup = NULL, *pick = NULL;
2430 	struct mptcp_subflow_context *subflow;
2431 	int min_stale_count = INT_MAX;
2432 
2433 	mptcp_for_each_subflow(msk, subflow) {
2434 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2435 
2436 		if (!__mptcp_subflow_active(subflow))
2437 			continue;
2438 
2439 		/* still data outstanding at TCP level? skip this */
2440 		if (!tcp_rtx_and_write_queues_empty(ssk)) {
2441 			mptcp_pm_subflow_chk_stale(msk, ssk);
2442 			min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2443 			continue;
2444 		}
2445 
2446 		if (subflow->backup || subflow->request_bkup) {
2447 			if (!backup)
2448 				backup = ssk;
2449 			continue;
2450 		}
2451 
2452 		if (!pick)
2453 			pick = ssk;
2454 	}
2455 
2456 	if (pick)
2457 		return pick;
2458 
2459 	/* use backup only if there are no progresses anywhere */
2460 	return min_stale_count > 1 ? backup : NULL;
2461 }
2462 
2463 bool __mptcp_retransmit_pending_data(struct sock *sk)
2464 {
2465 	struct mptcp_data_frag *cur, *rtx_head;
2466 	struct mptcp_sock *msk = mptcp_sk(sk);
2467 
2468 	if (__mptcp_check_fallback(msk))
2469 		return false;
2470 
2471 	/* the closing socket has some data untransmitted and/or unacked:
2472 	 * some data in the mptcp rtx queue has not really xmitted yet.
2473 	 * keep it simple and re-inject the whole mptcp level rtx queue
2474 	 */
2475 	mptcp_data_lock(sk);
2476 	__mptcp_clean_una_wakeup(sk);
2477 	rtx_head = mptcp_rtx_head(sk);
2478 	if (!rtx_head) {
2479 		mptcp_data_unlock(sk);
2480 		return false;
2481 	}
2482 
2483 	msk->recovery_snd_nxt = msk->snd_nxt;
2484 	msk->recovery = true;
2485 	mptcp_data_unlock(sk);
2486 
2487 	msk->first_pending = rtx_head;
2488 	msk->snd_burst = 0;
2489 
2490 	/* be sure to clear the "sent status" on all re-injected fragments */
2491 	list_for_each_entry(cur, &msk->rtx_queue, list) {
2492 		if (!cur->already_sent)
2493 			break;
2494 		cur->already_sent = 0;
2495 	}
2496 
2497 	return true;
2498 }
2499 
2500 /* flags for __mptcp_close_ssk() */
2501 #define MPTCP_CF_PUSH		BIT(1)
2502 
2503 /* be sure to send a reset only if the caller asked for it, also
2504  * clean completely the subflow status when the subflow reaches
2505  * TCP_CLOSE state
2506  */
2507 static void __mptcp_subflow_disconnect(struct sock *ssk,
2508 				       struct mptcp_subflow_context *subflow,
2509 				       bool fastclosing)
2510 {
2511 	if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
2512 	    fastclosing) {
2513 		/* The MPTCP code never wait on the subflow sockets, TCP-level
2514 		 * disconnect should never fail
2515 		 */
2516 		WARN_ON_ONCE(tcp_disconnect(ssk, 0));
2517 		mptcp_subflow_ctx_reset(subflow);
2518 	} else {
2519 		tcp_shutdown(ssk, SEND_SHUTDOWN);
2520 	}
2521 }
2522 
2523 /* subflow sockets can be either outgoing (connect) or incoming
2524  * (accept).
2525  *
2526  * Outgoing subflows use in-kernel sockets.
2527  * Incoming subflows do not have their own 'struct socket' allocated,
2528  * so we need to use tcp_close() after detaching them from the mptcp
2529  * parent socket.
2530  */
2531 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2532 			      struct mptcp_subflow_context *subflow,
2533 			      unsigned int flags)
2534 {
2535 	struct mptcp_sock *msk = mptcp_sk(sk);
2536 	bool dispose_it, need_push = false;
2537 	int fwd_remaining;
2538 
2539 	/* Do not pass RX data to the msk, even if the subflow socket is not
2540 	 * going to be freed (i.e. even for the first subflow on graceful
2541 	 * subflow close.
2542 	 */
2543 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2544 	subflow->closing = 1;
2545 
2546 	/* Borrow the fwd allocated page left-over; fwd memory for the subflow
2547 	 * could be negative at this point, but will be reach zero soon - when
2548 	 * the data allocated using such fragment will be freed.
2549 	 */
2550 	if (subflow->lent_mem_frag) {
2551 		fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag;
2552 		sk_forward_alloc_add(sk, fwd_remaining);
2553 		sk_forward_alloc_add(ssk, -fwd_remaining);
2554 		subflow->lent_mem_frag = 0;
2555 	}
2556 
2557 	/* If the first subflow moved to a close state before accept, e.g. due
2558 	 * to an incoming reset or listener shutdown, the subflow socket is
2559 	 * already deleted by inet_child_forget() and the mptcp socket can't
2560 	 * survive too.
2561 	 */
2562 	if (msk->in_accept_queue && msk->first == ssk &&
2563 	    (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
2564 		/* ensure later check in mptcp_worker() will dispose the msk */
2565 		sock_set_flag(sk, SOCK_DEAD);
2566 		mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1));
2567 		mptcp_subflow_drop_ctx(ssk);
2568 		goto out_release;
2569 	}
2570 
2571 	dispose_it = msk->free_first || ssk != msk->first;
2572 	if (dispose_it)
2573 		list_del(&subflow->node);
2574 
2575 	if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
2576 		tcp_set_state(ssk, TCP_CLOSE);
2577 
2578 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
2579 	if (!dispose_it) {
2580 		__mptcp_subflow_disconnect(ssk, subflow, msk->fastclosing);
2581 		release_sock(ssk);
2582 
2583 		goto out;
2584 	}
2585 
2586 	subflow->disposable = 1;
2587 
2588 	/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2589 	 * the ssk has been already destroyed, we just need to release the
2590 	 * reference owned by msk;
2591 	 */
2592 	if (!inet_csk(ssk)->icsk_ulp_ops) {
2593 		WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
2594 		kfree_rcu(subflow, rcu);
2595 	} else {
2596 		/* otherwise tcp will dispose of the ssk and subflow ctx */
2597 		__tcp_close(ssk, 0);
2598 
2599 		/* close acquired an extra ref */
2600 		__sock_put(ssk);
2601 	}
2602 
2603 out_release:
2604 	__mptcp_subflow_error_report(sk, ssk);
2605 	release_sock(ssk);
2606 
2607 	sock_put(ssk);
2608 
2609 	if (ssk == msk->first)
2610 		WRITE_ONCE(msk->first, NULL);
2611 
2612 out:
2613 	__mptcp_sync_sndbuf(sk);
2614 	if (need_push)
2615 		__mptcp_push_pending(sk, 0);
2616 
2617 	/* Catch every 'all subflows closed' scenario, including peers silently
2618 	 * closing them, e.g. due to timeout.
2619 	 * For established sockets, allow an additional timeout before closing,
2620 	 * as the protocol can still create more subflows.
2621 	 */
2622 	if (list_is_singular(&msk->conn_list) && msk->first &&
2623 	    inet_sk_state_load(msk->first) == TCP_CLOSE) {
2624 		if (sk->sk_state != TCP_ESTABLISHED ||
2625 		    msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
2626 			mptcp_set_state(sk, TCP_CLOSE);
2627 			mptcp_close_wake_up(sk);
2628 		} else {
2629 			mptcp_start_tout_timer(sk);
2630 		}
2631 	}
2632 }
2633 
2634 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2635 		     struct mptcp_subflow_context *subflow)
2636 {
2637 	struct mptcp_sock *msk = mptcp_sk(sk);
2638 	struct sk_buff *skb;
2639 
2640 	/* The first subflow can already be closed or disconnected */
2641 	if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0)
2642 		return;
2643 
2644 	subflow->close_event_done = true;
2645 
2646 	if (sk->sk_state == TCP_ESTABLISHED)
2647 		mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2648 
2649 	/* Remove any reference from the backlog to this ssk; backlog skbs consume
2650 	 * space in the msk receive queue, no need to touch sk->sk_rmem_alloc
2651 	 */
2652 	list_for_each_entry(skb, &msk->backlog_list, list) {
2653 		if (skb->sk != ssk)
2654 			continue;
2655 
2656 		atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc);
2657 		skb->sk = NULL;
2658 	}
2659 
2660 	/* subflow aborted before reaching the fully_established status
2661 	 * attempt the creation of the next subflow
2662 	 */
2663 	mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow);
2664 
2665 	__mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2666 }
2667 
2668 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2669 {
2670 	return 0;
2671 }
2672 
2673 static void __mptcp_close_subflow(struct sock *sk)
2674 {
2675 	struct mptcp_subflow_context *subflow, *tmp;
2676 	struct mptcp_sock *msk = mptcp_sk(sk);
2677 
2678 	might_sleep();
2679 
2680 	mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2681 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2682 		int ssk_state = inet_sk_state_load(ssk);
2683 
2684 		if (ssk_state != TCP_CLOSE &&
2685 		    (ssk_state != TCP_CLOSE_WAIT ||
2686 		     inet_sk_state_load(sk) != TCP_ESTABLISHED ||
2687 		     __mptcp_check_fallback(msk)))
2688 			continue;
2689 
2690 		/* 'subflow_data_ready' will re-sched once rx queue is empty */
2691 		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2692 			continue;
2693 
2694 		mptcp_close_ssk(sk, ssk, subflow);
2695 	}
2696 
2697 }
2698 
2699 static bool mptcp_close_tout_expired(const struct sock *sk)
2700 {
2701 	if (!inet_csk(sk)->icsk_mtup.probe_timestamp ||
2702 	    sk->sk_state == TCP_CLOSE)
2703 		return false;
2704 
2705 	return time_after32(tcp_jiffies32,
2706 		  inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk));
2707 }
2708 
2709 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2710 {
2711 	struct mptcp_subflow_context *subflow, *tmp;
2712 	struct sock *sk = (struct sock *)msk;
2713 
2714 	if (likely(!READ_ONCE(msk->rcv_fastclose)))
2715 		return;
2716 
2717 	mptcp_token_destroy(msk);
2718 
2719 	mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2720 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2721 		bool slow;
2722 
2723 		slow = lock_sock_fast(tcp_sk);
2724 		if (tcp_sk->sk_state != TCP_CLOSE) {
2725 			mptcp_send_active_reset_reason(tcp_sk);
2726 			tcp_set_state(tcp_sk, TCP_CLOSE);
2727 		}
2728 		unlock_sock_fast(tcp_sk, slow);
2729 	}
2730 
2731 	/* Mirror the tcp_reset() error propagation */
2732 	switch (sk->sk_state) {
2733 	case TCP_SYN_SENT:
2734 		WRITE_ONCE(sk->sk_err, ECONNREFUSED);
2735 		break;
2736 	case TCP_CLOSE_WAIT:
2737 		WRITE_ONCE(sk->sk_err, EPIPE);
2738 		break;
2739 	case TCP_CLOSE:
2740 		return;
2741 	default:
2742 		WRITE_ONCE(sk->sk_err, ECONNRESET);
2743 	}
2744 
2745 	mptcp_set_state(sk, TCP_CLOSE);
2746 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
2747 	smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2748 	set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2749 
2750 	/* the calling mptcp_worker will properly destroy the socket */
2751 	if (sock_flag(sk, SOCK_DEAD))
2752 		return;
2753 
2754 	sk->sk_state_change(sk);
2755 	sk_error_report(sk);
2756 }
2757 
2758 static void __mptcp_retrans(struct sock *sk)
2759 {
2760 	struct mptcp_sendmsg_info info = { .data_lock_held = true, };
2761 	struct mptcp_sock *msk = mptcp_sk(sk);
2762 	struct mptcp_subflow_context *subflow;
2763 	struct mptcp_data_frag *dfrag;
2764 	struct sock *ssk;
2765 	int ret, err;
2766 	u16 len = 0;
2767 
2768 	mptcp_clean_una_wakeup(sk);
2769 
2770 	/* first check ssk: need to kick "stale" logic */
2771 	err = mptcp_sched_get_retrans(msk);
2772 	dfrag = mptcp_rtx_head(sk);
2773 	if (!dfrag) {
2774 		if (mptcp_data_fin_enabled(msk)) {
2775 			struct inet_connection_sock *icsk = inet_csk(sk);
2776 
2777 			WRITE_ONCE(icsk->icsk_retransmits,
2778 				   icsk->icsk_retransmits + 1);
2779 			mptcp_set_datafin_timeout(sk);
2780 			mptcp_send_ack(msk);
2781 
2782 			goto reset_timer;
2783 		}
2784 
2785 		if (!mptcp_send_head(sk))
2786 			goto clear_scheduled;
2787 
2788 		goto reset_timer;
2789 	}
2790 
2791 	if (err)
2792 		goto reset_timer;
2793 
2794 	mptcp_for_each_subflow(msk, subflow) {
2795 		if (READ_ONCE(subflow->scheduled)) {
2796 			u16 copied = 0;
2797 
2798 			mptcp_subflow_set_scheduled(subflow, false);
2799 
2800 			ssk = mptcp_subflow_tcp_sock(subflow);
2801 
2802 			lock_sock(ssk);
2803 
2804 			/* limit retransmission to the bytes already sent on some subflows */
2805 			info.sent = 0;
2806 			info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len :
2807 								    dfrag->already_sent;
2808 
2809 			/*
2810 			 * make the whole retrans decision, xmit, disallow
2811 			 * fallback atomic, note that we can't retrans even
2812 			 * when an infinite fallback is in progress, i.e. new
2813 			 * subflows are disallowed.
2814 			 */
2815 			spin_lock_bh(&msk->fallback_lock);
2816 			if (__mptcp_check_fallback(msk) ||
2817 			    !msk->allow_subflows) {
2818 				spin_unlock_bh(&msk->fallback_lock);
2819 				release_sock(ssk);
2820 				goto clear_scheduled;
2821 			}
2822 
2823 			while (info.sent < info.limit) {
2824 				ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2825 				if (ret <= 0)
2826 					break;
2827 
2828 				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2829 				copied += ret;
2830 				info.sent += ret;
2831 			}
2832 			if (copied) {
2833 				len = max(copied, len);
2834 				tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2835 					 info.size_goal);
2836 				msk->allow_infinite_fallback = false;
2837 			}
2838 			spin_unlock_bh(&msk->fallback_lock);
2839 
2840 			release_sock(ssk);
2841 		}
2842 	}
2843 
2844 	msk->bytes_retrans += len;
2845 	dfrag->already_sent = max(dfrag->already_sent, len);
2846 
2847 reset_timer:
2848 	mptcp_check_and_set_pending(sk);
2849 
2850 	if (!mptcp_rtx_timer_pending(sk))
2851 		mptcp_reset_rtx_timer(sk);
2852 
2853 clear_scheduled:
2854 	/* If no rtx data was available or in case of fallback, there
2855 	 * could be left-over scheduled subflows; clear them all
2856 	 * or later xmit could use bad ones
2857 	 */
2858 	mptcp_for_each_subflow(msk, subflow)
2859 		if (READ_ONCE(subflow->scheduled))
2860 			mptcp_subflow_set_scheduled(subflow, false);
2861 }
2862 
2863 /* schedule the timeout timer for the relevant event: either close timeout
2864  * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
2865  */
2866 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
2867 {
2868 	struct sock *sk = (struct sock *)msk;
2869 	unsigned long timeout, close_timeout;
2870 
2871 	if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
2872 		return;
2873 
2874 	close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp -
2875 			tcp_jiffies32 + jiffies + mptcp_close_timeout(sk);
2876 
2877 	/* the close timeout takes precedence on the fail one, and here at least one of
2878 	 * them is active
2879 	 */
2880 	timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
2881 
2882 	sk_reset_timer(sk, &inet_csk(sk)->mptcp_tout_timer, timeout);
2883 }
2884 
2885 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
2886 {
2887 	struct sock *ssk = msk->first;
2888 	bool slow;
2889 
2890 	if (!ssk)
2891 		return;
2892 
2893 	pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
2894 
2895 	slow = lock_sock_fast(ssk);
2896 	mptcp_subflow_reset(ssk);
2897 	WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
2898 	unlock_sock_fast(ssk, slow);
2899 }
2900 
2901 static void mptcp_backlog_purge(struct sock *sk)
2902 {
2903 	struct mptcp_sock *msk = mptcp_sk(sk);
2904 	struct sk_buff *tmp, *skb;
2905 	LIST_HEAD(backlog);
2906 
2907 	mptcp_data_lock(sk);
2908 	list_splice_init(&msk->backlog_list, &backlog);
2909 	msk->backlog_len = 0;
2910 	mptcp_data_unlock(sk);
2911 
2912 	list_for_each_entry_safe(skb, tmp, &backlog, list) {
2913 		mptcp_borrow_fwdmem(sk, skb);
2914 		kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
2915 	}
2916 	sk_mem_reclaim(sk);
2917 }
2918 
2919 static void mptcp_do_fastclose(struct sock *sk)
2920 {
2921 	struct mptcp_subflow_context *subflow, *tmp;
2922 	struct mptcp_sock *msk = mptcp_sk(sk);
2923 
2924 	mptcp_set_state(sk, TCP_CLOSE);
2925 	mptcp_backlog_purge(sk);
2926 	msk->fastclosing = 1;
2927 
2928 	/* Explicitly send the fastclose reset as need */
2929 	if (__mptcp_check_fallback(msk))
2930 		return;
2931 
2932 	mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2933 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2934 
2935 		lock_sock(ssk);
2936 
2937 		/* Some subflow socket states don't allow/need a reset.*/
2938 		if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
2939 			goto unlock;
2940 
2941 		subflow->send_fastclose = 1;
2942 
2943 		/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2944 		 * issue in __tcp_select_window(), see tcp_disconnect().
2945 		 */
2946 		inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS;
2947 
2948 		tcp_send_active_reset(ssk, ssk->sk_allocation,
2949 				      SK_RST_REASON_TCP_ABORT_ON_CLOSE);
2950 unlock:
2951 		release_sock(ssk);
2952 	}
2953 }
2954 
2955 static void mptcp_worker(struct work_struct *work)
2956 {
2957 	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2958 	struct sock *sk = (struct sock *)msk;
2959 	unsigned long fail_tout;
2960 	int state;
2961 
2962 	lock_sock(sk);
2963 	state = sk->sk_state;
2964 	if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
2965 		goto unlock;
2966 
2967 	mptcp_check_fastclose(msk);
2968 
2969 	mptcp_pm_worker(msk);
2970 
2971 	mptcp_check_send_data_fin(sk);
2972 	mptcp_check_data_fin_ack(sk);
2973 	mptcp_check_data_fin(sk);
2974 
2975 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2976 		__mptcp_close_subflow(sk);
2977 
2978 	if (mptcp_close_tout_expired(sk)) {
2979 		struct mptcp_subflow_context *subflow, *tmp;
2980 
2981 		mptcp_do_fastclose(sk);
2982 		mptcp_for_each_subflow_safe(msk, subflow, tmp)
2983 			__mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
2984 		mptcp_close_wake_up(sk);
2985 	}
2986 
2987 	if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) {
2988 		__mptcp_destroy_sock(sk);
2989 		goto unlock;
2990 	}
2991 
2992 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2993 		__mptcp_retrans(sk);
2994 
2995 	fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
2996 	if (fail_tout && time_after(jiffies, fail_tout))
2997 		mptcp_mp_fail_no_response(msk);
2998 
2999 unlock:
3000 	release_sock(sk);
3001 	sock_put(sk);
3002 }
3003 
3004 static void __mptcp_init_sock(struct sock *sk)
3005 {
3006 	struct mptcp_sock *msk = mptcp_sk(sk);
3007 
3008 	INIT_LIST_HEAD(&msk->conn_list);
3009 	INIT_LIST_HEAD(&msk->join_list);
3010 	INIT_LIST_HEAD(&msk->rtx_queue);
3011 	INIT_LIST_HEAD(&msk->backlog_list);
3012 	INIT_WORK(&msk->work, mptcp_worker);
3013 	msk->out_of_order_queue = RB_ROOT;
3014 	msk->first_pending = NULL;
3015 	msk->timer_ival = TCP_RTO_MIN;
3016 	msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
3017 	msk->backlog_len = 0;
3018 
3019 	WRITE_ONCE(msk->first, NULL);
3020 	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
3021 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3022 	msk->allow_infinite_fallback = true;
3023 	msk->allow_subflows = true;
3024 	msk->recovery = false;
3025 	msk->subflow_id = 1;
3026 	msk->last_data_sent = tcp_jiffies32;
3027 	msk->last_data_recv = tcp_jiffies32;
3028 	msk->last_ack_recv = tcp_jiffies32;
3029 
3030 	mptcp_pm_data_init(msk);
3031 	spin_lock_init(&msk->fallback_lock);
3032 
3033 	/* re-use the csk retrans timer for MPTCP-level retrans */
3034 	timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0);
3035 	timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0);
3036 }
3037 
3038 static void mptcp_ca_reset(struct sock *sk)
3039 {
3040 	struct inet_connection_sock *icsk = inet_csk(sk);
3041 
3042 	tcp_assign_congestion_control(sk);
3043 	strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name,
3044 		sizeof(mptcp_sk(sk)->ca_name));
3045 
3046 	/* no need to keep a reference to the ops, the name will suffice */
3047 	tcp_cleanup_congestion_control(sk);
3048 	icsk->icsk_ca_ops = NULL;
3049 }
3050 
3051 static int mptcp_init_sock(struct sock *sk)
3052 {
3053 	struct net *net = sock_net(sk);
3054 	int ret;
3055 
3056 	__mptcp_init_sock(sk);
3057 
3058 	if (!mptcp_is_enabled(net))
3059 		return -ENOPROTOOPT;
3060 
3061 	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
3062 		return -ENOMEM;
3063 
3064 	rcu_read_lock();
3065 	ret = mptcp_init_sched(mptcp_sk(sk),
3066 			       mptcp_sched_find(mptcp_get_scheduler(net)));
3067 	rcu_read_unlock();
3068 	if (ret)
3069 		return ret;
3070 
3071 	set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
3072 
3073 	/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
3074 	 * propagate the correct value
3075 	 */
3076 	mptcp_ca_reset(sk);
3077 
3078 	sk_sockets_allocated_inc(sk);
3079 	sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]);
3080 	sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]);
3081 	sk->sk_write_space = sk_stream_write_space;
3082 
3083 	return 0;
3084 }
3085 
3086 static void __mptcp_clear_xmit(struct sock *sk)
3087 {
3088 	struct mptcp_sock *msk = mptcp_sk(sk);
3089 	struct mptcp_data_frag *dtmp, *dfrag;
3090 
3091 	msk->first_pending = NULL;
3092 	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
3093 		dfrag_clear(sk, dfrag);
3094 }
3095 
3096 void mptcp_cancel_work(struct sock *sk)
3097 {
3098 	struct mptcp_sock *msk = mptcp_sk(sk);
3099 
3100 	if (cancel_work_sync(&msk->work))
3101 		__sock_put(sk);
3102 }
3103 
3104 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
3105 {
3106 	lock_sock(ssk);
3107 
3108 	switch (ssk->sk_state) {
3109 	case TCP_LISTEN:
3110 		if (!(how & RCV_SHUTDOWN))
3111 			break;
3112 		fallthrough;
3113 	case TCP_SYN_SENT:
3114 		WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK));
3115 		break;
3116 	default:
3117 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
3118 			pr_debug("Fallback\n");
3119 			ssk->sk_shutdown |= how;
3120 			tcp_shutdown(ssk, how);
3121 
3122 			/* simulate the data_fin ack reception to let the state
3123 			 * machine move forward
3124 			 */
3125 			WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
3126 			mptcp_schedule_work(sk);
3127 		} else {
3128 			pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
3129 			tcp_send_ack(ssk);
3130 			if (!mptcp_rtx_timer_pending(sk))
3131 				mptcp_reset_rtx_timer(sk);
3132 		}
3133 		break;
3134 	}
3135 
3136 	release_sock(ssk);
3137 }
3138 
3139 void mptcp_set_state(struct sock *sk, int state)
3140 {
3141 	int oldstate = sk->sk_state;
3142 
3143 	switch (state) {
3144 	case TCP_ESTABLISHED:
3145 		if (oldstate != TCP_ESTABLISHED)
3146 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3147 		break;
3148 	case TCP_CLOSE_WAIT:
3149 		/* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
3150 		 * MPTCP "accepted" sockets will be created later on. So no
3151 		 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
3152 		 */
3153 		break;
3154 	default:
3155 		if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
3156 			MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3157 	}
3158 
3159 	inet_sk_state_store(sk, state);
3160 }
3161 
3162 static const unsigned char new_state[16] = {
3163 	/* current state:     new state:      action:	*/
3164 	[0 /* (Invalid) */] = TCP_CLOSE,
3165 	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3166 	[TCP_SYN_SENT]      = TCP_CLOSE,
3167 	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3168 	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
3169 	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
3170 	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
3171 	[TCP_CLOSE]         = TCP_CLOSE,
3172 	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
3173 	[TCP_LAST_ACK]      = TCP_LAST_ACK,
3174 	[TCP_LISTEN]        = TCP_CLOSE,
3175 	[TCP_CLOSING]       = TCP_CLOSING,
3176 	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
3177 };
3178 
3179 static int mptcp_close_state(struct sock *sk)
3180 {
3181 	int next = (int)new_state[sk->sk_state];
3182 	int ns = next & TCP_STATE_MASK;
3183 
3184 	mptcp_set_state(sk, ns);
3185 
3186 	return next & TCP_ACTION_FIN;
3187 }
3188 
3189 static void mptcp_check_send_data_fin(struct sock *sk)
3190 {
3191 	struct mptcp_subflow_context *subflow;
3192 	struct mptcp_sock *msk = mptcp_sk(sk);
3193 
3194 	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
3195 		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
3196 		 msk->snd_nxt, msk->write_seq);
3197 
3198 	/* we still need to enqueue subflows or not really shutting down,
3199 	 * skip this
3200 	 */
3201 	if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
3202 	    mptcp_send_head(sk))
3203 		return;
3204 
3205 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3206 
3207 	mptcp_for_each_subflow(msk, subflow) {
3208 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
3209 
3210 		mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
3211 	}
3212 }
3213 
3214 static void __mptcp_wr_shutdown(struct sock *sk)
3215 {
3216 	struct mptcp_sock *msk = mptcp_sk(sk);
3217 
3218 	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
3219 		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
3220 		 !!mptcp_send_head(sk));
3221 
3222 	/* will be ignored by fallback sockets */
3223 	WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
3224 	WRITE_ONCE(msk->snd_data_fin_enable, 1);
3225 
3226 	mptcp_check_send_data_fin(sk);
3227 }
3228 
3229 static void __mptcp_destroy_sock(struct sock *sk)
3230 {
3231 	struct mptcp_sock *msk = mptcp_sk(sk);
3232 
3233 	pr_debug("msk=%p\n", msk);
3234 
3235 	might_sleep();
3236 
3237 	mptcp_stop_rtx_timer(sk);
3238 	sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer);
3239 	msk->pm.status = 0;
3240 	mptcp_release_sched(msk);
3241 
3242 	sk->sk_prot->destroy(sk);
3243 
3244 	sk_stream_kill_queues(sk);
3245 	xfrm_sk_free_policy(sk);
3246 
3247 	sock_put(sk);
3248 }
3249 
3250 void __mptcp_unaccepted_force_close(struct sock *sk)
3251 {
3252 	sock_set_flag(sk, SOCK_DEAD);
3253 	mptcp_do_fastclose(sk);
3254 	__mptcp_destroy_sock(sk);
3255 }
3256 
3257 static __poll_t mptcp_check_readable(struct sock *sk)
3258 {
3259 	return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0;
3260 }
3261 
3262 static void mptcp_check_listen_stop(struct sock *sk)
3263 {
3264 	struct sock *ssk;
3265 
3266 	if (inet_sk_state_load(sk) != TCP_LISTEN)
3267 		return;
3268 
3269 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3270 	ssk = mptcp_sk(sk)->first;
3271 	if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN))
3272 		return;
3273 
3274 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
3275 	tcp_set_state(ssk, TCP_CLOSE);
3276 	mptcp_subflow_queue_clean(sk, ssk);
3277 	inet_csk_listen_stop(ssk);
3278 	mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
3279 	release_sock(ssk);
3280 }
3281 
3282 bool __mptcp_close(struct sock *sk, long timeout)
3283 {
3284 	struct mptcp_subflow_context *subflow;
3285 	struct mptcp_sock *msk = mptcp_sk(sk);
3286 	bool do_cancel_work = false;
3287 	int subflows_alive = 0;
3288 
3289 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
3290 
3291 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
3292 		mptcp_check_listen_stop(sk);
3293 		mptcp_set_state(sk, TCP_CLOSE);
3294 		goto cleanup;
3295 	}
3296 
3297 	if (mptcp_data_avail(msk) || timeout < 0) {
3298 		/* If the msk has read data, or the caller explicitly ask it,
3299 		 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
3300 		 */
3301 		mptcp_do_fastclose(sk);
3302 		timeout = 0;
3303 	} else if (mptcp_close_state(sk)) {
3304 		__mptcp_wr_shutdown(sk);
3305 	}
3306 
3307 	sk_stream_wait_close(sk, timeout);
3308 
3309 cleanup:
3310 	/* orphan all the subflows */
3311 	mptcp_for_each_subflow(msk, subflow) {
3312 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3313 		bool slow = lock_sock_fast_nested(ssk);
3314 
3315 		subflows_alive += ssk->sk_state != TCP_CLOSE;
3316 
3317 		/* since the close timeout takes precedence on the fail one,
3318 		 * cancel the latter
3319 		 */
3320 		if (ssk == msk->first)
3321 			subflow->fail_tout = 0;
3322 
3323 		/* detach from the parent socket, but allow data_ready to
3324 		 * push incoming data into the mptcp stack, to properly ack it
3325 		 */
3326 		ssk->sk_socket = NULL;
3327 		ssk->sk_wq = NULL;
3328 		unlock_sock_fast(ssk, slow);
3329 	}
3330 	sock_orphan(sk);
3331 
3332 	/* all the subflows are closed, only timeout can change the msk
3333 	 * state, let's not keep resources busy for no reasons
3334 	 */
3335 	if (subflows_alive == 0)
3336 		mptcp_set_state(sk, TCP_CLOSE);
3337 
3338 	sock_hold(sk);
3339 	pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
3340 	mptcp_pm_connection_closed(msk);
3341 
3342 	if (sk->sk_state == TCP_CLOSE) {
3343 		__mptcp_destroy_sock(sk);
3344 		do_cancel_work = true;
3345 	} else {
3346 		mptcp_start_tout_timer(sk);
3347 	}
3348 
3349 	return do_cancel_work;
3350 }
3351 
3352 static void mptcp_close(struct sock *sk, long timeout)
3353 {
3354 	bool do_cancel_work;
3355 
3356 	lock_sock(sk);
3357 
3358 	do_cancel_work = __mptcp_close(sk, timeout);
3359 	release_sock(sk);
3360 	if (do_cancel_work)
3361 		mptcp_cancel_work(sk);
3362 
3363 	sock_put(sk);
3364 }
3365 
3366 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
3367 {
3368 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3369 	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
3370 	struct ipv6_pinfo *msk6 = inet6_sk(msk);
3371 
3372 	msk->sk_v6_daddr = ssk->sk_v6_daddr;
3373 	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
3374 
3375 	if (msk6 && ssk6) {
3376 		msk6->saddr = ssk6->saddr;
3377 		msk6->flow_label = ssk6->flow_label;
3378 	}
3379 #endif
3380 
3381 	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
3382 	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
3383 	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
3384 	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
3385 	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
3386 	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
3387 }
3388 
3389 static void mptcp_destroy_common(struct mptcp_sock *msk)
3390 {
3391 	struct mptcp_subflow_context *subflow, *tmp;
3392 	struct sock *sk = (struct sock *)msk;
3393 
3394 	__mptcp_clear_xmit(sk);
3395 	mptcp_backlog_purge(sk);
3396 
3397 	/* join list will be eventually flushed (with rst) at sock lock release time */
3398 	mptcp_for_each_subflow_safe(msk, subflow, tmp)
3399 		__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
3400 
3401 	__skb_queue_purge(&sk->sk_receive_queue);
3402 	skb_rbtree_purge(&msk->out_of_order_queue);
3403 
3404 	/* move all the rx fwd alloc into the sk_mem_reclaim_final in
3405 	 * inet_sock_destruct() will dispose it
3406 	 */
3407 	mptcp_token_destroy(msk);
3408 	mptcp_pm_destroy(msk);
3409 }
3410 
3411 static int mptcp_disconnect(struct sock *sk, int flags)
3412 {
3413 	struct mptcp_sock *msk = mptcp_sk(sk);
3414 
3415 	/* We are on the fastopen error path. We can't call straight into the
3416 	 * subflows cleanup code due to lock nesting (we are already under
3417 	 * msk->firstsocket lock).
3418 	 */
3419 	if (msk->fastopening)
3420 		return -EBUSY;
3421 
3422 	mptcp_check_listen_stop(sk);
3423 	mptcp_set_state(sk, TCP_CLOSE);
3424 
3425 	mptcp_stop_rtx_timer(sk);
3426 	mptcp_stop_tout_timer(sk);
3427 
3428 	mptcp_pm_connection_closed(msk);
3429 
3430 	/* msk->subflow is still intact, the following will not free the first
3431 	 * subflow
3432 	 */
3433 	mptcp_do_fastclose(sk);
3434 	mptcp_destroy_common(msk);
3435 
3436 	/* The first subflow is already in TCP_CLOSE status, the following
3437 	 * can't overlap with a fallback anymore
3438 	 */
3439 	spin_lock_bh(&msk->fallback_lock);
3440 	msk->allow_subflows = true;
3441 	msk->allow_infinite_fallback = true;
3442 	WRITE_ONCE(msk->flags, 0);
3443 	spin_unlock_bh(&msk->fallback_lock);
3444 
3445 	msk->cb_flags = 0;
3446 	msk->recovery = false;
3447 	WRITE_ONCE(msk->can_ack, false);
3448 	WRITE_ONCE(msk->fully_established, false);
3449 	WRITE_ONCE(msk->rcv_data_fin, false);
3450 	WRITE_ONCE(msk->snd_data_fin_enable, false);
3451 	WRITE_ONCE(msk->rcv_fastclose, false);
3452 	WRITE_ONCE(msk->use_64bit_ack, false);
3453 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3454 	mptcp_pm_data_reset(msk);
3455 	mptcp_ca_reset(sk);
3456 	msk->bytes_consumed = 0;
3457 	msk->bytes_acked = 0;
3458 	msk->bytes_received = 0;
3459 	msk->bytes_sent = 0;
3460 	msk->bytes_retrans = 0;
3461 	msk->rcvspace_init = 0;
3462 	msk->fastclosing = 0;
3463 
3464 	/* for fallback's sake */
3465 	WRITE_ONCE(msk->ack_seq, 0);
3466 
3467 	WRITE_ONCE(sk->sk_shutdown, 0);
3468 	sk_error_report(sk);
3469 	return 0;
3470 }
3471 
3472 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3473 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
3474 {
3475 	struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk);
3476 
3477 	return &msk6->np;
3478 }
3479 
3480 static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
3481 {
3482 	const struct ipv6_pinfo *np = inet6_sk(sk);
3483 	struct ipv6_txoptions *opt;
3484 	struct ipv6_pinfo *newnp;
3485 
3486 	newnp = inet6_sk(newsk);
3487 
3488 	rcu_read_lock();
3489 	opt = rcu_dereference(np->opt);
3490 	if (opt) {
3491 		opt = ipv6_dup_options(newsk, opt);
3492 		if (!opt)
3493 			net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
3494 	}
3495 	RCU_INIT_POINTER(newnp->opt, opt);
3496 	rcu_read_unlock();
3497 }
3498 #endif
3499 
3500 static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
3501 {
3502 	struct ip_options_rcu *inet_opt, *newopt = NULL;
3503 	const struct inet_sock *inet = inet_sk(sk);
3504 	struct inet_sock *newinet;
3505 
3506 	newinet = inet_sk(newsk);
3507 
3508 	rcu_read_lock();
3509 	inet_opt = rcu_dereference(inet->inet_opt);
3510 	if (inet_opt) {
3511 		newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) +
3512 				      inet_opt->opt.optlen, GFP_ATOMIC);
3513 		if (!newopt)
3514 			net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
3515 	}
3516 	RCU_INIT_POINTER(newinet->inet_opt, newopt);
3517 	rcu_read_unlock();
3518 }
3519 
3520 struct sock *mptcp_sk_clone_init(const struct sock *sk,
3521 				 const struct mptcp_options_received *mp_opt,
3522 				 struct sock *ssk,
3523 				 struct request_sock *req)
3524 {
3525 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
3526 	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
3527 	struct mptcp_subflow_context *subflow;
3528 	struct mptcp_sock *msk;
3529 
3530 	if (!nsk)
3531 		return NULL;
3532 
3533 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3534 	if (nsk->sk_family == AF_INET6)
3535 		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
3536 #endif
3537 
3538 	__mptcp_init_sock(nsk);
3539 
3540 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3541 	if (nsk->sk_family == AF_INET6)
3542 		mptcp_copy_ip6_options(nsk, sk);
3543 	else
3544 #endif
3545 		mptcp_copy_ip_options(nsk, sk);
3546 
3547 	msk = mptcp_sk(nsk);
3548 	WRITE_ONCE(msk->local_key, subflow_req->local_key);
3549 	WRITE_ONCE(msk->token, subflow_req->token);
3550 	msk->in_accept_queue = 1;
3551 	WRITE_ONCE(msk->fully_established, false);
3552 	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
3553 		WRITE_ONCE(msk->csum_enabled, true);
3554 
3555 	WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
3556 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3557 	WRITE_ONCE(msk->snd_una, msk->write_seq);
3558 	WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
3559 	msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
3560 	mptcp_init_sched(msk, mptcp_sk(sk)->sched);
3561 
3562 	/* passive msk is created after the first/MPC subflow */
3563 	msk->subflow_id = 2;
3564 
3565 	sock_reset_flag(nsk, SOCK_RCU_FREE);
3566 	security_inet_csk_clone(nsk, req);
3567 
3568 	/* this can't race with mptcp_close(), as the msk is
3569 	 * not yet exposted to user-space
3570 	 */
3571 	mptcp_set_state(nsk, TCP_ESTABLISHED);
3572 
3573 	/* The msk maintain a ref to each subflow in the connections list */
3574 	WRITE_ONCE(msk->first, ssk);
3575 	subflow = mptcp_subflow_ctx(ssk);
3576 	list_add(&subflow->node, &msk->conn_list);
3577 	sock_hold(ssk);
3578 
3579 	/* new mpc subflow takes ownership of the newly
3580 	 * created mptcp socket
3581 	 */
3582 	mptcp_token_accept(subflow_req, msk);
3583 
3584 	/* set msk addresses early to ensure mptcp_pm_get_local_id()
3585 	 * uses the correct data
3586 	 */
3587 	mptcp_copy_inaddrs(nsk, ssk);
3588 	__mptcp_propagate_sndbuf(nsk, ssk);
3589 
3590 	mptcp_rcv_space_init(msk, ssk);
3591 	msk->rcvq_space.time = mptcp_stamp();
3592 
3593 	if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
3594 		__mptcp_subflow_fully_established(msk, subflow, mp_opt);
3595 	bh_unlock_sock(nsk);
3596 
3597 	/* note: the newly allocated socket refcount is 2 now */
3598 	return nsk;
3599 }
3600 
3601 static void mptcp_destroy(struct sock *sk)
3602 {
3603 	struct mptcp_sock *msk = mptcp_sk(sk);
3604 
3605 	/* allow the following to close even the initial subflow */
3606 	msk->free_first = 1;
3607 	mptcp_destroy_common(msk);
3608 	sk_sockets_allocated_dec(sk);
3609 }
3610 
3611 void __mptcp_data_acked(struct sock *sk)
3612 {
3613 	if (!sock_owned_by_user(sk))
3614 		__mptcp_clean_una(sk);
3615 	else
3616 		__set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
3617 }
3618 
3619 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3620 {
3621 	if (!sock_owned_by_user(sk))
3622 		__mptcp_subflow_push_pending(sk, ssk, false);
3623 	else
3624 		__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3625 }
3626 
3627 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3628 				      BIT(MPTCP_RETRANSMIT) | \
3629 				      BIT(MPTCP_FLUSH_JOIN_LIST))
3630 
3631 /* processes deferred events and flush wmem */
3632 static void mptcp_release_cb(struct sock *sk)
3633 	__must_hold(&sk->sk_lock.slock)
3634 {
3635 	struct mptcp_sock *msk = mptcp_sk(sk);
3636 
3637 	for (;;) {
3638 		unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
3639 		struct list_head join_list, skbs;
3640 		bool spool_bl;
3641 		u32 moved;
3642 
3643 		spool_bl = mptcp_can_spool_backlog(sk, &skbs);
3644 		if (!flags && !spool_bl)
3645 			break;
3646 
3647 		INIT_LIST_HEAD(&join_list);
3648 		list_splice_init(&msk->join_list, &join_list);
3649 
3650 		/* the following actions acquire the subflow socket lock
3651 		 *
3652 		 * 1) can't be invoked in atomic scope
3653 		 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3654 		 *    datapath acquires the msk socket spinlock while helding
3655 		 *    the subflow socket lock
3656 		 */
3657 		msk->cb_flags &= ~flags;
3658 		spin_unlock_bh(&sk->sk_lock.slock);
3659 
3660 		if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
3661 			__mptcp_flush_join_list(sk, &join_list);
3662 		if (flags & BIT(MPTCP_PUSH_PENDING))
3663 			__mptcp_push_pending(sk, 0);
3664 		if (flags & BIT(MPTCP_RETRANSMIT))
3665 			__mptcp_retrans(sk);
3666 		if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) {
3667 			/* notify ack seq update */
3668 			mptcp_cleanup_rbuf(msk, 0);
3669 			sk->sk_data_ready(sk);
3670 		}
3671 
3672 		cond_resched();
3673 		spin_lock_bh(&sk->sk_lock.slock);
3674 		if (spool_bl)
3675 			mptcp_backlog_spooled(sk, moved, &skbs);
3676 	}
3677 
3678 	if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
3679 		__mptcp_clean_una_wakeup(sk);
3680 	if (unlikely(msk->cb_flags)) {
3681 		/* be sure to sync the msk state before taking actions
3682 		 * depending on sk_state (MPTCP_ERROR_REPORT)
3683 		 * On sk release avoid actions depending on the first subflow
3684 		 */
3685 		if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first)
3686 			__mptcp_sync_state(sk, msk->pending_state);
3687 		if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
3688 			__mptcp_error_report(sk);
3689 		if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
3690 			__mptcp_sync_sndbuf(sk);
3691 	}
3692 }
3693 
3694 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3695  * TCP can't schedule delack timer before the subflow is fully established.
3696  * MPTCP uses the delack timer to do 3rd ack retransmissions
3697  */
3698 static void schedule_3rdack_retransmission(struct sock *ssk)
3699 {
3700 	struct inet_connection_sock *icsk = inet_csk(ssk);
3701 	struct tcp_sock *tp = tcp_sk(ssk);
3702 	unsigned long timeout;
3703 
3704 	if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established))
3705 		return;
3706 
3707 	/* reschedule with a timeout above RTT, as we must look only for drop */
3708 	if (tp->srtt_us)
3709 		timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
3710 	else
3711 		timeout = TCP_TIMEOUT_INIT;
3712 	timeout += jiffies;
3713 
3714 	WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
3715 	smp_store_release(&icsk->icsk_ack.pending,
3716 			  icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
3717 	sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
3718 }
3719 
3720 void mptcp_subflow_process_delegated(struct sock *ssk, long status)
3721 {
3722 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3723 	struct sock *sk = subflow->conn;
3724 
3725 	if (status & BIT(MPTCP_DELEGATE_SEND)) {
3726 		mptcp_data_lock(sk);
3727 		if (!sock_owned_by_user(sk))
3728 			__mptcp_subflow_push_pending(sk, ssk, true);
3729 		else
3730 			__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3731 		mptcp_data_unlock(sk);
3732 	}
3733 	if (status & BIT(MPTCP_DELEGATE_SNDBUF)) {
3734 		mptcp_data_lock(sk);
3735 		if (!sock_owned_by_user(sk))
3736 			__mptcp_sync_sndbuf(sk);
3737 		else
3738 			__set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags);
3739 		mptcp_data_unlock(sk);
3740 	}
3741 	if (status & BIT(MPTCP_DELEGATE_ACK))
3742 		schedule_3rdack_retransmission(ssk);
3743 }
3744 
3745 static int mptcp_hash(struct sock *sk)
3746 {
3747 	/* should never be called,
3748 	 * we hash the TCP subflows not the MPTCP socket
3749 	 */
3750 	WARN_ON_ONCE(1);
3751 	return 0;
3752 }
3753 
3754 static void mptcp_unhash(struct sock *sk)
3755 {
3756 	/* called from sk_common_release(), but nothing to do here */
3757 }
3758 
3759 static int mptcp_get_port(struct sock *sk, unsigned short snum)
3760 {
3761 	struct mptcp_sock *msk = mptcp_sk(sk);
3762 
3763 	pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
3764 	if (WARN_ON_ONCE(!msk->first))
3765 		return -EINVAL;
3766 
3767 	return inet_csk_get_port(msk->first, snum);
3768 }
3769 
3770 void mptcp_finish_connect(struct sock *ssk)
3771 {
3772 	struct mptcp_subflow_context *subflow;
3773 	struct mptcp_sock *msk;
3774 	struct sock *sk;
3775 
3776 	subflow = mptcp_subflow_ctx(ssk);
3777 	sk = subflow->conn;
3778 	msk = mptcp_sk(sk);
3779 
3780 	pr_debug("msk=%p, token=%u\n", sk, subflow->token);
3781 
3782 	subflow->map_seq = subflow->iasn;
3783 	subflow->map_subflow_seq = 1;
3784 
3785 	/* the socket is not connected yet, no msk/subflow ops can access/race
3786 	 * accessing the field below
3787 	 */
3788 	WRITE_ONCE(msk->local_key, subflow->local_key);
3789 	WRITE_ONCE(msk->rcvq_space.time, mptcp_stamp());
3790 
3791 	mptcp_pm_new_connection(msk, ssk, 0);
3792 }
3793 
3794 void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3795 {
3796 	write_lock_bh(&sk->sk_callback_lock);
3797 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
3798 	sk_set_socket(sk, parent);
3799 	write_unlock_bh(&sk->sk_callback_lock);
3800 }
3801 
3802 /* Can be called without holding the msk socket lock; use the callback lock
3803  * to avoid {READ_,WRITE_}ONCE annotations on sk_socket.
3804  */
3805 static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk)
3806 {
3807 	struct socket *sock;
3808 
3809 	write_lock_bh(&sk->sk_callback_lock);
3810 	sock = sk->sk_socket;
3811 	write_unlock_bh(&sk->sk_callback_lock);
3812 	if (sock) {
3813 		mptcp_sock_graft(ssk, sock);
3814 		__mptcp_inherit_cgrp_data(sk, ssk);
3815 		__mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC);
3816 	}
3817 }
3818 
3819 bool mptcp_finish_join(struct sock *ssk)
3820 {
3821 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3822 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3823 	struct sock *parent = (void *)msk;
3824 	bool ret = true;
3825 
3826 	pr_debug("msk=%p, subflow=%p\n", msk, subflow);
3827 
3828 	/* mptcp socket already closing? */
3829 	if (!mptcp_is_fully_established(parent)) {
3830 		subflow->reset_reason = MPTCP_RST_EMPTCP;
3831 		return false;
3832 	}
3833 
3834 	/* Active subflow, already present inside the conn_list; is grafted
3835 	 * either by __mptcp_subflow_connect() or accept.
3836 	 */
3837 	if (!list_empty(&subflow->node)) {
3838 		spin_lock_bh(&msk->fallback_lock);
3839 		if (!msk->allow_subflows) {
3840 			spin_unlock_bh(&msk->fallback_lock);
3841 			return false;
3842 		}
3843 		mptcp_subflow_joined(msk, ssk);
3844 		spin_unlock_bh(&msk->fallback_lock);
3845 		mptcp_propagate_sndbuf(parent, ssk);
3846 		return true;
3847 	}
3848 
3849 	if (!mptcp_pm_allow_new_subflow(msk)) {
3850 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_JOINREJECTED);
3851 		goto err_prohibited;
3852 	}
3853 
3854 	/* If we can't acquire msk socket lock here, let the release callback
3855 	 * handle it
3856 	 */
3857 	mptcp_data_lock(parent);
3858 	if (!sock_owned_by_user(parent)) {
3859 		ret = __mptcp_finish_join(msk, ssk);
3860 		if (ret) {
3861 			sock_hold(ssk);
3862 			list_add_tail(&subflow->node, &msk->conn_list);
3863 			mptcp_sock_check_graft(parent, ssk);
3864 		}
3865 	} else {
3866 		sock_hold(ssk);
3867 		list_add_tail(&subflow->node, &msk->join_list);
3868 		__set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
3869 
3870 		/* In case of later failures, __mptcp_flush_join_list() will
3871 		 * properly orphan the ssk via mptcp_close_ssk().
3872 		 */
3873 		mptcp_sock_check_graft(parent, ssk);
3874 	}
3875 	mptcp_data_unlock(parent);
3876 
3877 	if (!ret) {
3878 err_prohibited:
3879 		subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3880 		return false;
3881 	}
3882 
3883 	return true;
3884 }
3885 
3886 static void mptcp_shutdown(struct sock *sk, int how)
3887 {
3888 	pr_debug("sk=%p, how=%d\n", sk, how);
3889 
3890 	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3891 		__mptcp_wr_shutdown(sk);
3892 }
3893 
3894 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
3895 {
3896 	const struct sock *sk = (void *)msk;
3897 	u64 delta;
3898 
3899 	if (sk->sk_state == TCP_LISTEN)
3900 		return -EINVAL;
3901 
3902 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
3903 		return 0;
3904 
3905 	delta = msk->write_seq - v;
3906 	if (__mptcp_check_fallback(msk) && msk->first) {
3907 		struct tcp_sock *tp = tcp_sk(msk->first);
3908 
3909 		/* the first subflow is disconnected after close - see
3910 		 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3911 		 * so ignore that status, too.
3912 		 */
3913 		if (!((1 << msk->first->sk_state) &
3914 		      (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
3915 			delta += READ_ONCE(tp->write_seq) - tp->snd_una;
3916 	}
3917 	if (delta > INT_MAX)
3918 		delta = INT_MAX;
3919 
3920 	return (int)delta;
3921 }
3922 
3923 static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
3924 {
3925 	struct mptcp_sock *msk = mptcp_sk(sk);
3926 	bool slow;
3927 
3928 	switch (cmd) {
3929 	case SIOCINQ:
3930 		if (sk->sk_state == TCP_LISTEN)
3931 			return -EINVAL;
3932 
3933 		lock_sock(sk);
3934 		if (mptcp_move_skbs(sk))
3935 			mptcp_cleanup_rbuf(msk, 0);
3936 		*karg = mptcp_inq_hint(sk);
3937 		release_sock(sk);
3938 		break;
3939 	case SIOCOUTQ:
3940 		slow = lock_sock_fast(sk);
3941 		*karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
3942 		unlock_sock_fast(sk, slow);
3943 		break;
3944 	case SIOCOUTQNSD:
3945 		slow = lock_sock_fast(sk);
3946 		*karg = mptcp_ioctl_outq(msk, msk->snd_nxt);
3947 		unlock_sock_fast(sk, slow);
3948 		break;
3949 	default:
3950 		return -ENOIOCTLCMD;
3951 	}
3952 
3953 	return 0;
3954 }
3955 
3956 static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
3957 			 int addr_len)
3958 {
3959 	struct mptcp_subflow_context *subflow;
3960 	struct mptcp_sock *msk = mptcp_sk(sk);
3961 	int err = -EINVAL;
3962 	struct sock *ssk;
3963 
3964 	ssk = __mptcp_nmpc_sk(msk);
3965 	if (IS_ERR(ssk))
3966 		return PTR_ERR(ssk);
3967 
3968 	mptcp_set_state(sk, TCP_SYN_SENT);
3969 	subflow = mptcp_subflow_ctx(ssk);
3970 #ifdef CONFIG_TCP_MD5SIG
3971 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3972 	 * TCP option space.
3973 	 */
3974 	if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
3975 		mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK);
3976 #endif
3977 	if (subflow->request_mptcp) {
3978 		if (mptcp_active_should_disable(sk))
3979 			mptcp_early_fallback(msk, subflow,
3980 					     MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
3981 		else if (mptcp_token_new_connect(ssk) < 0)
3982 			mptcp_early_fallback(msk, subflow,
3983 					     MPTCP_MIB_TOKENFALLBACKINIT);
3984 	}
3985 
3986 	WRITE_ONCE(msk->write_seq, subflow->idsn);
3987 	WRITE_ONCE(msk->snd_nxt, subflow->idsn);
3988 	WRITE_ONCE(msk->snd_una, subflow->idsn);
3989 	if (likely(!__mptcp_check_fallback(msk)))
3990 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
3991 
3992 	/* if reaching here via the fastopen/sendmsg path, the caller already
3993 	 * acquired the subflow socket lock, too.
3994 	 */
3995 	if (!msk->fastopening)
3996 		lock_sock(ssk);
3997 
3998 	/* the following mirrors closely a very small chunk of code from
3999 	 * __inet_stream_connect()
4000 	 */
4001 	if (ssk->sk_state != TCP_CLOSE)
4002 		goto out;
4003 
4004 	if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) {
4005 		err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len);
4006 		if (err)
4007 			goto out;
4008 	}
4009 
4010 	err = ssk->sk_prot->connect(ssk, uaddr, addr_len);
4011 	if (err < 0)
4012 		goto out;
4013 
4014 	inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk));
4015 
4016 out:
4017 	if (!msk->fastopening)
4018 		release_sock(ssk);
4019 
4020 	/* on successful connect, the msk state will be moved to established by
4021 	 * subflow_finish_connect()
4022 	 */
4023 	if (unlikely(err)) {
4024 		/* avoid leaving a dangling token in an unconnected socket */
4025 		mptcp_token_destroy(msk);
4026 		mptcp_set_state(sk, TCP_CLOSE);
4027 		return err;
4028 	}
4029 
4030 	mptcp_copy_inaddrs(sk, ssk);
4031 	return 0;
4032 }
4033 
4034 static struct proto mptcp_prot = {
4035 	.name		= "MPTCP",
4036 	.owner		= THIS_MODULE,
4037 	.init		= mptcp_init_sock,
4038 	.connect	= mptcp_connect,
4039 	.disconnect	= mptcp_disconnect,
4040 	.close		= mptcp_close,
4041 	.setsockopt	= mptcp_setsockopt,
4042 	.getsockopt	= mptcp_getsockopt,
4043 	.shutdown	= mptcp_shutdown,
4044 	.destroy	= mptcp_destroy,
4045 	.sendmsg	= mptcp_sendmsg,
4046 	.ioctl		= mptcp_ioctl,
4047 	.recvmsg	= mptcp_recvmsg,
4048 	.release_cb	= mptcp_release_cb,
4049 	.hash		= mptcp_hash,
4050 	.unhash		= mptcp_unhash,
4051 	.get_port	= mptcp_get_port,
4052 	.stream_memory_free	= mptcp_stream_memory_free,
4053 	.sockets_allocated	= &mptcp_sockets_allocated,
4054 
4055 	.memory_allocated	= &net_aligned_data.tcp_memory_allocated,
4056 	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
4057 
4058 	.memory_pressure	= &tcp_memory_pressure,
4059 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
4060 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
4061 	.sysctl_mem	= sysctl_tcp_mem,
4062 	.obj_size	= sizeof(struct mptcp_sock),
4063 	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
4064 	.no_autobind	= true,
4065 };
4066 
4067 static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
4068 {
4069 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
4070 	struct sock *ssk, *sk = sock->sk;
4071 	int err = -EINVAL;
4072 
4073 	lock_sock(sk);
4074 	ssk = __mptcp_nmpc_sk(msk);
4075 	if (IS_ERR(ssk)) {
4076 		err = PTR_ERR(ssk);
4077 		goto unlock;
4078 	}
4079 
4080 	if (sk->sk_family == AF_INET)
4081 		err = inet_bind_sk(ssk, uaddr, addr_len);
4082 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4083 	else if (sk->sk_family == AF_INET6)
4084 		err = inet6_bind_sk(ssk, uaddr, addr_len);
4085 #endif
4086 	if (!err)
4087 		mptcp_copy_inaddrs(sk, ssk);
4088 
4089 unlock:
4090 	release_sock(sk);
4091 	return err;
4092 }
4093 
4094 static int mptcp_listen(struct socket *sock, int backlog)
4095 {
4096 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
4097 	struct sock *sk = sock->sk;
4098 	struct sock *ssk;
4099 	int err;
4100 
4101 	pr_debug("msk=%p\n", msk);
4102 
4103 	lock_sock(sk);
4104 
4105 	err = -EINVAL;
4106 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
4107 		goto unlock;
4108 
4109 	ssk = __mptcp_nmpc_sk(msk);
4110 	if (IS_ERR(ssk)) {
4111 		err = PTR_ERR(ssk);
4112 		goto unlock;
4113 	}
4114 
4115 	mptcp_set_state(sk, TCP_LISTEN);
4116 	sock_set_flag(sk, SOCK_RCU_FREE);
4117 
4118 	lock_sock(ssk);
4119 	err = __inet_listen_sk(ssk, backlog);
4120 	release_sock(ssk);
4121 	mptcp_set_state(sk, inet_sk_state_load(ssk));
4122 
4123 	if (!err) {
4124 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
4125 		mptcp_copy_inaddrs(sk, ssk);
4126 		mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
4127 	}
4128 
4129 unlock:
4130 	release_sock(sk);
4131 	return err;
4132 }
4133 
4134 static void mptcp_graft_subflows(struct sock *sk)
4135 {
4136 	struct mptcp_subflow_context *subflow;
4137 	struct mptcp_sock *msk = mptcp_sk(sk);
4138 
4139 	if (mem_cgroup_sockets_enabled) {
4140 		LIST_HEAD(join_list);
4141 
4142 		/* Subflows joining after __inet_accept() will get the
4143 		 * mem CG properly initialized at mptcp_finish_join() time,
4144 		 * but subflows pending in join_list need explicit
4145 		 * initialization before flushing `backlog_unaccounted`
4146 		 * or MPTCP can later unexpectedly observe unaccounted memory.
4147 		 */
4148 		mptcp_data_lock(sk);
4149 		list_splice_init(&msk->join_list, &join_list);
4150 		mptcp_data_unlock(sk);
4151 
4152 		__mptcp_flush_join_list(sk, &join_list);
4153 	}
4154 
4155 	mptcp_for_each_subflow(msk, subflow) {
4156 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4157 
4158 		lock_sock(ssk);
4159 
4160 		/* Set ssk->sk_socket of accept()ed flows to mptcp socket.
4161 		 * This is needed so NOSPACE flag can be set from tcp stack.
4162 		 */
4163 		if (!ssk->sk_socket)
4164 			mptcp_sock_graft(ssk, sk->sk_socket);
4165 
4166 		if (!mem_cgroup_sk_enabled(sk))
4167 			goto unlock;
4168 
4169 		__mptcp_inherit_cgrp_data(sk, ssk);
4170 		__mptcp_inherit_memcg(sk, ssk, GFP_KERNEL);
4171 
4172 unlock:
4173 		release_sock(ssk);
4174 	}
4175 
4176 	if (mem_cgroup_sk_enabled(sk)) {
4177 		gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
4178 		int amt;
4179 
4180 		/* Account the backlog memory; prior accept() is aware of
4181 		 * fwd and rmem only.
4182 		 */
4183 		mptcp_data_lock(sk);
4184 		amt = sk_mem_pages(sk->sk_forward_alloc +
4185 				   msk->backlog_unaccounted +
4186 				   atomic_read(&sk->sk_rmem_alloc)) -
4187 		      sk_mem_pages(sk->sk_forward_alloc +
4188 				   atomic_read(&sk->sk_rmem_alloc));
4189 		msk->backlog_unaccounted = 0;
4190 		mptcp_data_unlock(sk);
4191 
4192 		if (amt)
4193 			mem_cgroup_sk_charge(sk, amt, gfp);
4194 	}
4195 }
4196 
4197 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
4198 			       struct proto_accept_arg *arg)
4199 {
4200 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
4201 	struct sock *ssk, *newsk;
4202 
4203 	pr_debug("msk=%p\n", msk);
4204 
4205 	/* Buggy applications can call accept on socket states other then LISTEN
4206 	 * but no need to allocate the first subflow just to error out.
4207 	 */
4208 	ssk = READ_ONCE(msk->first);
4209 	if (!ssk)
4210 		return -EINVAL;
4211 
4212 	pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
4213 	newsk = inet_csk_accept(ssk, arg);
4214 	if (!newsk)
4215 		return arg->err;
4216 
4217 	pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
4218 	if (sk_is_mptcp(newsk)) {
4219 		struct mptcp_subflow_context *subflow;
4220 		struct sock *new_mptcp_sock;
4221 
4222 		subflow = mptcp_subflow_ctx(newsk);
4223 		new_mptcp_sock = subflow->conn;
4224 
4225 		/* is_mptcp should be false if subflow->conn is missing, see
4226 		 * subflow_syn_recv_sock()
4227 		 */
4228 		if (WARN_ON_ONCE(!new_mptcp_sock)) {
4229 			tcp_sk(newsk)->is_mptcp = 0;
4230 			goto tcpfallback;
4231 		}
4232 
4233 		newsk = new_mptcp_sock;
4234 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
4235 
4236 		newsk->sk_kern_sock = arg->kern;
4237 		lock_sock(newsk);
4238 		__inet_accept(sock, newsock, newsk);
4239 
4240 		set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
4241 		msk = mptcp_sk(newsk);
4242 		msk->in_accept_queue = 0;
4243 
4244 		mptcp_graft_subflows(newsk);
4245 		mptcp_rps_record_subflows(msk);
4246 
4247 		/* Do late cleanup for the first subflow as necessary. Also
4248 		 * deal with bad peers not doing a complete shutdown.
4249 		 */
4250 		if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
4251 			if (unlikely(list_is_singular(&msk->conn_list)))
4252 				mptcp_set_state(newsk, TCP_CLOSE);
4253 			mptcp_close_ssk(newsk, msk->first,
4254 					mptcp_subflow_ctx(msk->first));
4255 		}
4256 	} else {
4257 tcpfallback:
4258 		newsk->sk_kern_sock = arg->kern;
4259 		lock_sock(newsk);
4260 		__inet_accept(sock, newsock, newsk);
4261 		/* we are being invoked after accepting a non-mp-capable
4262 		 * flow: sk is a tcp_sk, not an mptcp one.
4263 		 *
4264 		 * Hand the socket over to tcp so all further socket ops
4265 		 * bypass mptcp.
4266 		 */
4267 		WRITE_ONCE(newsock->sk->sk_socket->ops,
4268 			   mptcp_fallback_tcp_ops(newsock->sk));
4269 	}
4270 	release_sock(newsk);
4271 
4272 	return 0;
4273 }
4274 
4275 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
4276 {
4277 	struct sock *sk = (struct sock *)msk;
4278 
4279 	if (__mptcp_stream_is_writeable(sk, 1))
4280 		return EPOLLOUT | EPOLLWRNORM;
4281 
4282 	set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
4283 	smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
4284 	if (__mptcp_stream_is_writeable(sk, 1))
4285 		return EPOLLOUT | EPOLLWRNORM;
4286 
4287 	return 0;
4288 }
4289 
4290 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
4291 			   struct poll_table_struct *wait)
4292 {
4293 	struct sock *sk = sock->sk;
4294 	struct mptcp_sock *msk;
4295 	__poll_t mask = 0;
4296 	u8 shutdown;
4297 	int state;
4298 
4299 	msk = mptcp_sk(sk);
4300 	sock_poll_wait(file, sock, wait);
4301 
4302 	state = inet_sk_state_load(sk);
4303 	pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
4304 	if (state == TCP_LISTEN) {
4305 		struct sock *ssk = READ_ONCE(msk->first);
4306 
4307 		if (WARN_ON_ONCE(!ssk))
4308 			return 0;
4309 
4310 		return inet_csk_listen_poll(ssk);
4311 	}
4312 
4313 	shutdown = READ_ONCE(sk->sk_shutdown);
4314 	if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
4315 		mask |= EPOLLHUP;
4316 	if (shutdown & RCV_SHUTDOWN)
4317 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
4318 
4319 	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
4320 		mask |= mptcp_check_readable(sk);
4321 		if (shutdown & SEND_SHUTDOWN)
4322 			mask |= EPOLLOUT | EPOLLWRNORM;
4323 		else
4324 			mask |= mptcp_check_writeable(msk);
4325 	} else if (state == TCP_SYN_SENT &&
4326 		   inet_test_bit(DEFER_CONNECT, sk)) {
4327 		/* cf tcp_poll() note about TFO */
4328 		mask |= EPOLLOUT | EPOLLWRNORM;
4329 	}
4330 
4331 	/* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
4332 	smp_rmb();
4333 	if (READ_ONCE(sk->sk_err))
4334 		mask |= EPOLLERR;
4335 
4336 	return mask;
4337 }
4338 
4339 static struct sk_buff *mptcp_recv_skb(struct sock *sk, u32 *off)
4340 {
4341 	struct mptcp_sock *msk = mptcp_sk(sk);
4342 	struct sk_buff *skb;
4343 	u32 offset;
4344 
4345 	if (!list_empty(&msk->backlog_list))
4346 		mptcp_move_skbs(sk);
4347 
4348 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
4349 		offset = MPTCP_SKB_CB(skb)->offset;
4350 		if (offset < skb->len) {
4351 			*off = offset;
4352 			return skb;
4353 		}
4354 		mptcp_eat_recv_skb(sk, skb);
4355 	}
4356 	return NULL;
4357 }
4358 
4359 /*
4360  * Note:
4361  *	- It is assumed that the socket was locked by the caller.
4362  */
4363 static int __mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4364 			     sk_read_actor_t recv_actor, bool noack)
4365 {
4366 	struct mptcp_sock *msk = mptcp_sk(sk);
4367 	struct sk_buff *skb;
4368 	int copied = 0;
4369 	u32 offset;
4370 
4371 	msk_owned_by_me(msk);
4372 
4373 	if (sk->sk_state == TCP_LISTEN)
4374 		return -ENOTCONN;
4375 	while ((skb = mptcp_recv_skb(sk, &offset)) != NULL) {
4376 		u32 data_len = skb->len - offset;
4377 		int count;
4378 		u32 size;
4379 
4380 		size = min_t(size_t, data_len, INT_MAX);
4381 		count = recv_actor(desc, skb, offset, size);
4382 		if (count <= 0) {
4383 			if (!copied)
4384 				copied = count;
4385 			break;
4386 		}
4387 
4388 		copied += count;
4389 
4390 		msk->bytes_consumed += count;
4391 		if (count < data_len) {
4392 			MPTCP_SKB_CB(skb)->offset += count;
4393 			MPTCP_SKB_CB(skb)->map_seq += count;
4394 			break;
4395 		}
4396 
4397 		mptcp_eat_recv_skb(sk, skb);
4398 	}
4399 
4400 	if (noack)
4401 		goto out;
4402 
4403 	mptcp_rcv_space_adjust(msk, copied);
4404 
4405 	if (copied > 0) {
4406 		mptcp_recv_skb(sk, &offset);
4407 		mptcp_cleanup_rbuf(msk, copied);
4408 	}
4409 out:
4410 	return copied;
4411 }
4412 
4413 static int mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4414 			   sk_read_actor_t recv_actor)
4415 {
4416 	return __mptcp_read_sock(sk, desc, recv_actor, false);
4417 }
4418 
4419 static int __mptcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
4420 {
4421 	/* Store TCP splice context information in read_descriptor_t. */
4422 	read_descriptor_t rd_desc = {
4423 		.arg.data = tss,
4424 		.count	  = tss->len,
4425 	};
4426 
4427 	return mptcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
4428 }
4429 
4430 /**
4431  *  mptcp_splice_read - splice data from MPTCP socket to a pipe
4432  * @sock:	socket to splice from
4433  * @ppos:	position (not valid)
4434  * @pipe:	pipe to splice to
4435  * @len:	number of bytes to splice
4436  * @flags:	splice modifier flags
4437  *
4438  * Description:
4439  *    Will read pages from given socket and fill them into a pipe.
4440  *
4441  * Return:
4442  *    Amount of bytes that have been spliced.
4443  *
4444  **/
4445 static ssize_t mptcp_splice_read(struct socket *sock, loff_t *ppos,
4446 				 struct pipe_inode_info *pipe, size_t len,
4447 				 unsigned int flags)
4448 {
4449 	struct tcp_splice_state tss = {
4450 		.pipe	= pipe,
4451 		.len	= len,
4452 		.flags	= flags,
4453 	};
4454 	struct sock *sk = sock->sk;
4455 	ssize_t spliced = 0;
4456 	int ret = 0;
4457 	long timeo;
4458 
4459 	/*
4460 	 * We can't seek on a socket input
4461 	 */
4462 	if (unlikely(*ppos))
4463 		return -ESPIPE;
4464 
4465 	lock_sock(sk);
4466 
4467 	mptcp_rps_record_subflows(mptcp_sk(sk));
4468 
4469 	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
4470 	while (tss.len) {
4471 		ret = __mptcp_splice_read(sk, &tss);
4472 		if (ret < 0) {
4473 			break;
4474 		} else if (!ret) {
4475 			if (spliced)
4476 				break;
4477 			if (sock_flag(sk, SOCK_DONE))
4478 				break;
4479 			if (sk->sk_err) {
4480 				ret = sock_error(sk);
4481 				break;
4482 			}
4483 			if (sk->sk_shutdown & RCV_SHUTDOWN)
4484 				break;
4485 			if (sk->sk_state == TCP_CLOSE) {
4486 				/*
4487 				 * This occurs when user tries to read
4488 				 * from never connected socket.
4489 				 */
4490 				ret = -ENOTCONN;
4491 				break;
4492 			}
4493 			if (!timeo) {
4494 				ret = -EAGAIN;
4495 				break;
4496 			}
4497 			/* if __mptcp_splice_read() got nothing while we have
4498 			 * an skb in receive queue, we do not want to loop.
4499 			 * This might happen with URG data.
4500 			 */
4501 			if (!skb_queue_empty(&sk->sk_receive_queue))
4502 				break;
4503 			ret = sk_wait_data(sk, &timeo, NULL);
4504 			if (ret < 0)
4505 				break;
4506 			if (signal_pending(current)) {
4507 				ret = sock_intr_errno(timeo);
4508 				break;
4509 			}
4510 			continue;
4511 		}
4512 		tss.len -= ret;
4513 		spliced += ret;
4514 
4515 		if (!tss.len || !timeo)
4516 			break;
4517 		release_sock(sk);
4518 		lock_sock(sk);
4519 
4520 		if (tcp_recv_should_stop(sk))
4521 			break;
4522 	}
4523 
4524 	release_sock(sk);
4525 
4526 	if (spliced)
4527 		return spliced;
4528 
4529 	return ret;
4530 }
4531 
4532 static const struct proto_ops mptcp_stream_ops = {
4533 	.family		   = PF_INET,
4534 	.owner		   = THIS_MODULE,
4535 	.release	   = inet_release,
4536 	.bind		   = mptcp_bind,
4537 	.connect	   = inet_stream_connect,
4538 	.socketpair	   = sock_no_socketpair,
4539 	.accept		   = mptcp_stream_accept,
4540 	.getname	   = inet_getname,
4541 	.poll		   = mptcp_poll,
4542 	.ioctl		   = inet_ioctl,
4543 	.gettstamp	   = sock_gettstamp,
4544 	.listen		   = mptcp_listen,
4545 	.shutdown	   = inet_shutdown,
4546 	.setsockopt	   = sock_common_setsockopt,
4547 	.getsockopt	   = sock_common_getsockopt,
4548 	.sendmsg	   = inet_sendmsg,
4549 	.recvmsg	   = inet_recvmsg,
4550 	.mmap		   = sock_no_mmap,
4551 	.set_rcvlowat	   = mptcp_set_rcvlowat,
4552 	.read_sock	   = mptcp_read_sock,
4553 	.splice_read	   = mptcp_splice_read,
4554 };
4555 
4556 static struct inet_protosw mptcp_protosw = {
4557 	.type		= SOCK_STREAM,
4558 	.protocol	= IPPROTO_MPTCP,
4559 	.prot		= &mptcp_prot,
4560 	.ops		= &mptcp_stream_ops,
4561 	.flags		= INET_PROTOSW_ICSK,
4562 };
4563 
4564 static int mptcp_napi_poll(struct napi_struct *napi, int budget)
4565 {
4566 	struct mptcp_delegated_action *delegated;
4567 	struct mptcp_subflow_context *subflow;
4568 	int work_done = 0;
4569 
4570 	delegated = container_of(napi, struct mptcp_delegated_action, napi);
4571 	while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
4572 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4573 
4574 		bh_lock_sock_nested(ssk);
4575 		if (!sock_owned_by_user(ssk)) {
4576 			mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
4577 		} else {
4578 			/* tcp_release_cb_override already processed
4579 			 * the action or will do at next release_sock().
4580 			 * In both case must dequeue the subflow here - on the same
4581 			 * CPU that scheduled it.
4582 			 */
4583 			smp_wmb();
4584 			clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
4585 		}
4586 		bh_unlock_sock(ssk);
4587 		sock_put(ssk);
4588 
4589 		if (++work_done == budget)
4590 			return budget;
4591 	}
4592 
4593 	/* always provide a 0 'work_done' argument, so that napi_complete_done
4594 	 * will not try accessing the NULL napi->dev ptr
4595 	 */
4596 	napi_complete_done(napi, 0);
4597 	return work_done;
4598 }
4599 
4600 void __init mptcp_proto_init(void)
4601 {
4602 	struct mptcp_delegated_action *delegated;
4603 	int cpu;
4604 
4605 	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
4606 
4607 	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
4608 		panic("Failed to allocate MPTCP pcpu counter\n");
4609 
4610 	mptcp_napi_dev = alloc_netdev_dummy(0);
4611 	if (!mptcp_napi_dev)
4612 		panic("Failed to allocate MPTCP dummy netdev\n");
4613 	for_each_possible_cpu(cpu) {
4614 		delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
4615 		INIT_LIST_HEAD(&delegated->head);
4616 		netif_napi_add_tx(mptcp_napi_dev, &delegated->napi,
4617 				  mptcp_napi_poll);
4618 		napi_enable(&delegated->napi);
4619 	}
4620 
4621 	mptcp_subflow_init();
4622 	mptcp_pm_init();
4623 	mptcp_sched_init();
4624 	mptcp_token_init();
4625 
4626 	if (proto_register(&mptcp_prot, 1) != 0)
4627 		panic("Failed to register MPTCP proto.\n");
4628 
4629 	inet_register_protosw(&mptcp_protosw);
4630 
4631 	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
4632 
4633 	/* struct mptcp_data_frag: 'overhead' corresponds to the alignment
4634 	 * (ALIGN(1, sizeof(long)) - 1, so 8-1) + the struct's size
4635 	 */
4636 	BUILD_BUG_ON(ALIGN(1, sizeof(long)) - 1 + sizeof(struct mptcp_data_frag)
4637 		     > U8_MAX);
4638 }
4639 
4640 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4641 static const struct proto_ops mptcp_v6_stream_ops = {
4642 	.family		   = PF_INET6,
4643 	.owner		   = THIS_MODULE,
4644 	.release	   = inet6_release,
4645 	.bind		   = mptcp_bind,
4646 	.connect	   = inet_stream_connect,
4647 	.socketpair	   = sock_no_socketpair,
4648 	.accept		   = mptcp_stream_accept,
4649 	.getname	   = inet6_getname,
4650 	.poll		   = mptcp_poll,
4651 	.ioctl		   = inet6_ioctl,
4652 	.gettstamp	   = sock_gettstamp,
4653 	.listen		   = mptcp_listen,
4654 	.shutdown	   = inet_shutdown,
4655 	.setsockopt	   = sock_common_setsockopt,
4656 	.getsockopt	   = sock_common_getsockopt,
4657 	.sendmsg	   = inet6_sendmsg,
4658 	.recvmsg	   = inet6_recvmsg,
4659 	.mmap		   = sock_no_mmap,
4660 #ifdef CONFIG_COMPAT
4661 	.compat_ioctl	   = inet6_compat_ioctl,
4662 #endif
4663 	.set_rcvlowat	   = mptcp_set_rcvlowat,
4664 	.read_sock	   = mptcp_read_sock,
4665 	.splice_read	   = mptcp_splice_read,
4666 };
4667 
4668 static struct proto mptcp_v6_prot;
4669 
4670 static struct inet_protosw mptcp_v6_protosw = {
4671 	.type		= SOCK_STREAM,
4672 	.protocol	= IPPROTO_MPTCP,
4673 	.prot		= &mptcp_v6_prot,
4674 	.ops		= &mptcp_v6_stream_ops,
4675 	.flags		= INET_PROTOSW_ICSK,
4676 };
4677 
4678 int __init mptcp_proto_v6_init(void)
4679 {
4680 	int err;
4681 
4682 	mptcp_v6_prot = mptcp_prot;
4683 	strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name));
4684 	mptcp_v6_prot.slab = NULL;
4685 	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
4686 	mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np);
4687 
4688 	err = proto_register(&mptcp_v6_prot, 1);
4689 	if (err)
4690 		return err;
4691 
4692 	err = inet6_register_protosw(&mptcp_v6_protosw);
4693 	if (err)
4694 		proto_unregister(&mptcp_v6_prot);
4695 
4696 	return err;
4697 }
4698 #endif
4699