xref: /linux/net/mptcp/subflow.c (revision d6296cb65320be16dbf20f2fd584ddc25f3437cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
22 #endif
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
25 #include "protocol.h"
26 #include "mib.h"
27 
28 #include <trace/events/mptcp.h>
29 #include <trace/events/sock.h>
30 
31 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
32 
33 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
34 				  enum linux_mptcp_mib_field field)
35 {
36 	MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
37 }
38 
39 static void subflow_req_destructor(struct request_sock *req)
40 {
41 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
42 
43 	pr_debug("subflow_req=%p", subflow_req);
44 
45 	if (subflow_req->msk)
46 		sock_put((struct sock *)subflow_req->msk);
47 
48 	mptcp_token_destroy_request(req);
49 }
50 
51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
52 				  void *hmac)
53 {
54 	u8 msg[8];
55 
56 	put_unaligned_be32(nonce1, &msg[0]);
57 	put_unaligned_be32(nonce2, &msg[4]);
58 
59 	mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
60 }
61 
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 {
64 	return mptcp_is_fully_established((void *)msk) &&
65 		((mptcp_pm_is_userspace(msk) &&
66 		  mptcp_userspace_pm_active(msk)) ||
67 		 READ_ONCE(msk->pm.accept_subflow));
68 }
69 
70 /* validate received token and create truncated hmac and nonce for SYN-ACK */
71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
72 {
73 	struct mptcp_sock *msk = subflow_req->msk;
74 	u8 hmac[SHA256_DIGEST_SIZE];
75 
76 	get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
77 
78 	subflow_generate_hmac(msk->local_key, msk->remote_key,
79 			      subflow_req->local_nonce,
80 			      subflow_req->remote_nonce, hmac);
81 
82 	subflow_req->thmac = get_unaligned_be64(hmac);
83 }
84 
85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
86 {
87 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
88 	struct mptcp_sock *msk;
89 	int local_id;
90 
91 	msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 	if (!msk) {
93 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
94 		return NULL;
95 	}
96 
97 	local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
98 	if (local_id < 0) {
99 		sock_put((struct sock *)msk);
100 		return NULL;
101 	}
102 	subflow_req->local_id = local_id;
103 
104 	return msk;
105 }
106 
107 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
108 {
109 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
110 
111 	subflow_req->mp_capable = 0;
112 	subflow_req->mp_join = 0;
113 	subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
114 	subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
115 	subflow_req->msk = NULL;
116 	mptcp_token_init_request(req);
117 }
118 
119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
120 {
121 	return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
122 }
123 
124 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
125 {
126 	struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
127 
128 	if (mpext) {
129 		memset(mpext, 0, sizeof(*mpext));
130 		mpext->reset_reason = reason;
131 	}
132 }
133 
134 /* Init mptcp request socket.
135  *
136  * Returns an error code if a JOIN has failed and a TCP reset
137  * should be sent.
138  */
139 static int subflow_check_req(struct request_sock *req,
140 			     const struct sock *sk_listener,
141 			     struct sk_buff *skb)
142 {
143 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
144 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
145 	struct mptcp_options_received mp_opt;
146 	bool opt_mp_capable, opt_mp_join;
147 
148 	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
149 
150 #ifdef CONFIG_TCP_MD5SIG
151 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
152 	 * TCP option space.
153 	 */
154 	if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
155 		return -EINVAL;
156 #endif
157 
158 	mptcp_get_options(skb, &mp_opt);
159 
160 	opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
161 	opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
162 	if (opt_mp_capable) {
163 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
164 
165 		if (opt_mp_join)
166 			return 0;
167 	} else if (opt_mp_join) {
168 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
169 	}
170 
171 	if (opt_mp_capable && listener->request_mptcp) {
172 		int err, retries = MPTCP_TOKEN_MAX_RETRIES;
173 
174 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
175 again:
176 		do {
177 			get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
178 		} while (subflow_req->local_key == 0);
179 
180 		if (unlikely(req->syncookie)) {
181 			mptcp_crypto_key_sha(subflow_req->local_key,
182 					     &subflow_req->token,
183 					     &subflow_req->idsn);
184 			if (mptcp_token_exists(subflow_req->token)) {
185 				if (retries-- > 0)
186 					goto again;
187 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
188 			} else {
189 				subflow_req->mp_capable = 1;
190 			}
191 			return 0;
192 		}
193 
194 		err = mptcp_token_new_request(req);
195 		if (err == 0)
196 			subflow_req->mp_capable = 1;
197 		else if (retries-- > 0)
198 			goto again;
199 		else
200 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
201 
202 	} else if (opt_mp_join && listener->request_mptcp) {
203 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
204 		subflow_req->mp_join = 1;
205 		subflow_req->backup = mp_opt.backup;
206 		subflow_req->remote_id = mp_opt.join_id;
207 		subflow_req->token = mp_opt.token;
208 		subflow_req->remote_nonce = mp_opt.nonce;
209 		subflow_req->msk = subflow_token_join_request(req);
210 
211 		/* Can't fall back to TCP in this case. */
212 		if (!subflow_req->msk) {
213 			subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
214 			return -EPERM;
215 		}
216 
217 		if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
218 			pr_debug("syn inet_sport=%d %d",
219 				 ntohs(inet_sk(sk_listener)->inet_sport),
220 				 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
221 			if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
222 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
223 				return -EPERM;
224 			}
225 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
226 		}
227 
228 		subflow_req_create_thmac(subflow_req);
229 
230 		if (unlikely(req->syncookie)) {
231 			if (mptcp_can_accept_new_subflow(subflow_req->msk))
232 				subflow_init_req_cookie_join_save(subflow_req, skb);
233 			else
234 				return -EPERM;
235 		}
236 
237 		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
238 			 subflow_req->remote_nonce, subflow_req->msk);
239 	}
240 
241 	return 0;
242 }
243 
244 int mptcp_subflow_init_cookie_req(struct request_sock *req,
245 				  const struct sock *sk_listener,
246 				  struct sk_buff *skb)
247 {
248 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
249 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
250 	struct mptcp_options_received mp_opt;
251 	bool opt_mp_capable, opt_mp_join;
252 	int err;
253 
254 	subflow_init_req(req, sk_listener);
255 	mptcp_get_options(skb, &mp_opt);
256 
257 	opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
258 	opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
259 	if (opt_mp_capable && opt_mp_join)
260 		return -EINVAL;
261 
262 	if (opt_mp_capable && listener->request_mptcp) {
263 		if (mp_opt.sndr_key == 0)
264 			return -EINVAL;
265 
266 		subflow_req->local_key = mp_opt.rcvr_key;
267 		err = mptcp_token_new_request(req);
268 		if (err)
269 			return err;
270 
271 		subflow_req->mp_capable = 1;
272 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
273 	} else if (opt_mp_join && listener->request_mptcp) {
274 		if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
275 			return -EINVAL;
276 
277 		subflow_req->mp_join = 1;
278 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
279 	}
280 
281 	return 0;
282 }
283 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
284 
285 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
286 					      struct sk_buff *skb,
287 					      struct flowi *fl,
288 					      struct request_sock *req)
289 {
290 	struct dst_entry *dst;
291 	int err;
292 
293 	tcp_rsk(req)->is_mptcp = 1;
294 	subflow_init_req(req, sk);
295 
296 	dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
297 	if (!dst)
298 		return NULL;
299 
300 	err = subflow_check_req(req, sk, skb);
301 	if (err == 0)
302 		return dst;
303 
304 	dst_release(dst);
305 	if (!req->syncookie)
306 		tcp_request_sock_ops.send_reset(sk, skb);
307 	return NULL;
308 }
309 
310 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
311 				struct tcp_fastopen_cookie *foc,
312 				enum tcp_synack_type synack_type)
313 {
314 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
315 	struct inet_request_sock *ireq = inet_rsk(req);
316 
317 	/* clear tstamp_ok, as needed depending on cookie */
318 	if (foc && foc->len > -1)
319 		ireq->tstamp_ok = 0;
320 
321 	if (synack_type == TCP_SYNACK_FASTOPEN)
322 		mptcp_fastopen_subflow_synack_set_params(subflow, req);
323 }
324 
325 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
326 				  struct flowi *fl,
327 				  struct request_sock *req,
328 				  struct tcp_fastopen_cookie *foc,
329 				  enum tcp_synack_type synack_type,
330 				  struct sk_buff *syn_skb)
331 {
332 	subflow_prep_synack(sk, req, foc, synack_type);
333 
334 	return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
335 						     synack_type, syn_skb);
336 }
337 
338 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
339 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
340 				  struct flowi *fl,
341 				  struct request_sock *req,
342 				  struct tcp_fastopen_cookie *foc,
343 				  enum tcp_synack_type synack_type,
344 				  struct sk_buff *syn_skb)
345 {
346 	subflow_prep_synack(sk, req, foc, synack_type);
347 
348 	return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
349 						     synack_type, syn_skb);
350 }
351 
352 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
353 					      struct sk_buff *skb,
354 					      struct flowi *fl,
355 					      struct request_sock *req)
356 {
357 	struct dst_entry *dst;
358 	int err;
359 
360 	tcp_rsk(req)->is_mptcp = 1;
361 	subflow_init_req(req, sk);
362 
363 	dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
364 	if (!dst)
365 		return NULL;
366 
367 	err = subflow_check_req(req, sk, skb);
368 	if (err == 0)
369 		return dst;
370 
371 	dst_release(dst);
372 	if (!req->syncookie)
373 		tcp6_request_sock_ops.send_reset(sk, skb);
374 	return NULL;
375 }
376 #endif
377 
378 /* validate received truncated hmac and create hmac for third ACK */
379 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
380 {
381 	u8 hmac[SHA256_DIGEST_SIZE];
382 	u64 thmac;
383 
384 	subflow_generate_hmac(subflow->remote_key, subflow->local_key,
385 			      subflow->remote_nonce, subflow->local_nonce,
386 			      hmac);
387 
388 	thmac = get_unaligned_be64(hmac);
389 	pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
390 		 subflow, subflow->token, thmac, subflow->thmac);
391 
392 	return thmac == subflow->thmac;
393 }
394 
395 void mptcp_subflow_reset(struct sock *ssk)
396 {
397 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
398 	struct sock *sk = subflow->conn;
399 
400 	/* must hold: tcp_done() could drop last reference on parent */
401 	sock_hold(sk);
402 
403 	tcp_set_state(ssk, TCP_CLOSE);
404 	tcp_send_active_reset(ssk, GFP_ATOMIC);
405 	tcp_done(ssk);
406 	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
407 	    schedule_work(&mptcp_sk(sk)->work))
408 		return; /* worker will put sk for us */
409 
410 	sock_put(sk);
411 }
412 
413 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
414 {
415 	return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
416 }
417 
418 void __mptcp_set_connected(struct sock *sk)
419 {
420 	if (sk->sk_state == TCP_SYN_SENT) {
421 		inet_sk_state_store(sk, TCP_ESTABLISHED);
422 		sk->sk_state_change(sk);
423 	}
424 }
425 
426 static void mptcp_set_connected(struct sock *sk)
427 {
428 	mptcp_data_lock(sk);
429 	if (!sock_owned_by_user(sk))
430 		__mptcp_set_connected(sk);
431 	else
432 		__set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
433 	mptcp_data_unlock(sk);
434 }
435 
436 static void subflow_set_remote_key(struct mptcp_sock *msk,
437 				   struct mptcp_subflow_context *subflow,
438 				   const struct mptcp_options_received *mp_opt)
439 {
440 	/* active MPC subflow will reach here multiple times:
441 	 * at subflow_finish_connect() time and at 4th ack time
442 	 */
443 	if (subflow->remote_key_valid)
444 		return;
445 
446 	subflow->remote_key_valid = 1;
447 	subflow->remote_key = mp_opt->sndr_key;
448 	mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
449 	subflow->iasn++;
450 
451 	WRITE_ONCE(msk->remote_key, subflow->remote_key);
452 	WRITE_ONCE(msk->ack_seq, subflow->iasn);
453 	WRITE_ONCE(msk->can_ack, true);
454 	atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
455 }
456 
457 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
458 {
459 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
460 	struct mptcp_options_received mp_opt;
461 	struct sock *parent = subflow->conn;
462 	struct mptcp_sock *msk;
463 
464 	subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
465 
466 	/* be sure no special action on any packet other than syn-ack */
467 	if (subflow->conn_finished)
468 		return;
469 
470 	msk = mptcp_sk(parent);
471 	mptcp_propagate_sndbuf(parent, sk);
472 	subflow->rel_write_seq = 1;
473 	subflow->conn_finished = 1;
474 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
475 	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
476 
477 	mptcp_get_options(skb, &mp_opt);
478 	if (subflow->request_mptcp) {
479 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
480 			MPTCP_INC_STATS(sock_net(sk),
481 					MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
482 			mptcp_do_fallback(sk);
483 			pr_fallback(msk);
484 			goto fallback;
485 		}
486 
487 		if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
488 			WRITE_ONCE(msk->csum_enabled, true);
489 		if (mp_opt.deny_join_id0)
490 			WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
491 		subflow->mp_capable = 1;
492 		subflow_set_remote_key(msk, subflow, &mp_opt);
493 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
494 		mptcp_finish_connect(sk);
495 		mptcp_set_connected(parent);
496 	} else if (subflow->request_join) {
497 		u8 hmac[SHA256_DIGEST_SIZE];
498 
499 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
500 			subflow->reset_reason = MPTCP_RST_EMPTCP;
501 			goto do_reset;
502 		}
503 
504 		subflow->backup = mp_opt.backup;
505 		subflow->thmac = mp_opt.thmac;
506 		subflow->remote_nonce = mp_opt.nonce;
507 		subflow->remote_id = mp_opt.join_id;
508 		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
509 			 subflow, subflow->thmac, subflow->remote_nonce,
510 			 subflow->backup);
511 
512 		if (!subflow_thmac_valid(subflow)) {
513 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
514 			subflow->reset_reason = MPTCP_RST_EMPTCP;
515 			goto do_reset;
516 		}
517 
518 		if (!mptcp_finish_join(sk))
519 			goto do_reset;
520 
521 		subflow_generate_hmac(subflow->local_key, subflow->remote_key,
522 				      subflow->local_nonce,
523 				      subflow->remote_nonce,
524 				      hmac);
525 		memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
526 
527 		subflow->mp_join = 1;
528 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
529 
530 		if (subflow_use_different_dport(msk, sk)) {
531 			pr_debug("synack inet_dport=%d %d",
532 				 ntohs(inet_sk(sk)->inet_dport),
533 				 ntohs(inet_sk(parent)->inet_dport));
534 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
535 		}
536 	} else if (mptcp_check_fallback(sk)) {
537 fallback:
538 		mptcp_rcv_space_init(msk, sk);
539 		mptcp_set_connected(parent);
540 	}
541 	return;
542 
543 do_reset:
544 	subflow->reset_transient = 0;
545 	mptcp_subflow_reset(sk);
546 }
547 
548 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
549 {
550 	subflow->local_id = local_id;
551 	subflow->local_id_valid = 1;
552 }
553 
554 static int subflow_chk_local_id(struct sock *sk)
555 {
556 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
557 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
558 	int err;
559 
560 	if (likely(subflow->local_id_valid))
561 		return 0;
562 
563 	err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
564 	if (err < 0)
565 		return err;
566 
567 	subflow_set_local_id(subflow, err);
568 	return 0;
569 }
570 
571 static int subflow_rebuild_header(struct sock *sk)
572 {
573 	int err = subflow_chk_local_id(sk);
574 
575 	if (unlikely(err < 0))
576 		return err;
577 
578 	return inet_sk_rebuild_header(sk);
579 }
580 
581 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
582 static int subflow_v6_rebuild_header(struct sock *sk)
583 {
584 	int err = subflow_chk_local_id(sk);
585 
586 	if (unlikely(err < 0))
587 		return err;
588 
589 	return inet6_sk_rebuild_header(sk);
590 }
591 #endif
592 
593 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
594 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
595 
596 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
597 {
598 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
599 
600 	pr_debug("subflow=%p", subflow);
601 
602 	/* Never answer to SYNs sent to broadcast or multicast */
603 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
604 		goto drop;
605 
606 	return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
607 				&subflow_request_sock_ipv4_ops,
608 				sk, skb);
609 drop:
610 	tcp_listendrop(sk);
611 	return 0;
612 }
613 
614 static void subflow_v4_req_destructor(struct request_sock *req)
615 {
616 	subflow_req_destructor(req);
617 	tcp_request_sock_ops.destructor(req);
618 }
619 
620 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
621 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
622 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
623 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
624 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
625 static struct proto tcpv6_prot_override;
626 
627 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
628 {
629 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
630 
631 	pr_debug("subflow=%p", subflow);
632 
633 	if (skb->protocol == htons(ETH_P_IP))
634 		return subflow_v4_conn_request(sk, skb);
635 
636 	if (!ipv6_unicast_destination(skb))
637 		goto drop;
638 
639 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
640 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
641 		return 0;
642 	}
643 
644 	return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
645 				&subflow_request_sock_ipv6_ops, sk, skb);
646 
647 drop:
648 	tcp_listendrop(sk);
649 	return 0; /* don't send reset */
650 }
651 
652 static void subflow_v6_req_destructor(struct request_sock *req)
653 {
654 	subflow_req_destructor(req);
655 	tcp6_request_sock_ops.destructor(req);
656 }
657 #endif
658 
659 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
660 					       struct sock *sk_listener,
661 					       bool attach_listener)
662 {
663 	if (ops->family == AF_INET)
664 		ops = &mptcp_subflow_v4_request_sock_ops;
665 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
666 	else if (ops->family == AF_INET6)
667 		ops = &mptcp_subflow_v6_request_sock_ops;
668 #endif
669 
670 	return inet_reqsk_alloc(ops, sk_listener, attach_listener);
671 }
672 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
673 
674 /* validate hmac received in third ACK */
675 static bool subflow_hmac_valid(const struct request_sock *req,
676 			       const struct mptcp_options_received *mp_opt)
677 {
678 	const struct mptcp_subflow_request_sock *subflow_req;
679 	u8 hmac[SHA256_DIGEST_SIZE];
680 	struct mptcp_sock *msk;
681 
682 	subflow_req = mptcp_subflow_rsk(req);
683 	msk = subflow_req->msk;
684 	if (!msk)
685 		return false;
686 
687 	subflow_generate_hmac(msk->remote_key, msk->local_key,
688 			      subflow_req->remote_nonce,
689 			      subflow_req->local_nonce, hmac);
690 
691 	return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
692 }
693 
694 static void mptcp_force_close(struct sock *sk)
695 {
696 	/* the msk is not yet exposed to user-space */
697 	inet_sk_state_store(sk, TCP_CLOSE);
698 	sk_common_release(sk);
699 }
700 
701 static void subflow_ulp_fallback(struct sock *sk,
702 				 struct mptcp_subflow_context *old_ctx)
703 {
704 	struct inet_connection_sock *icsk = inet_csk(sk);
705 
706 	mptcp_subflow_tcp_fallback(sk, old_ctx);
707 	icsk->icsk_ulp_ops = NULL;
708 	rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
709 	tcp_sk(sk)->is_mptcp = 0;
710 
711 	mptcp_subflow_ops_undo_override(sk);
712 }
713 
714 static void subflow_drop_ctx(struct sock *ssk)
715 {
716 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
717 
718 	if (!ctx)
719 		return;
720 
721 	subflow_ulp_fallback(ssk, ctx);
722 	if (ctx->conn)
723 		sock_put(ctx->conn);
724 
725 	kfree_rcu(ctx, rcu);
726 }
727 
728 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
729 				     const struct mptcp_options_received *mp_opt)
730 {
731 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
732 
733 	subflow_set_remote_key(msk, subflow, mp_opt);
734 	subflow->fully_established = 1;
735 	WRITE_ONCE(msk->fully_established, true);
736 
737 	if (subflow->is_mptfo)
738 		mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
739 }
740 
741 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
742 					  struct sk_buff *skb,
743 					  struct request_sock *req,
744 					  struct dst_entry *dst,
745 					  struct request_sock *req_unhash,
746 					  bool *own_req)
747 {
748 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
749 	struct mptcp_subflow_request_sock *subflow_req;
750 	struct mptcp_options_received mp_opt;
751 	bool fallback, fallback_is_fatal;
752 	struct sock *new_msk = NULL;
753 	struct sock *child;
754 
755 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
756 
757 	/* After child creation we must look for MPC even when options
758 	 * are not parsed
759 	 */
760 	mp_opt.suboptions = 0;
761 
762 	/* hopefully temporary handling for MP_JOIN+syncookie */
763 	subflow_req = mptcp_subflow_rsk(req);
764 	fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
765 	fallback = !tcp_rsk(req)->is_mptcp;
766 	if (fallback)
767 		goto create_child;
768 
769 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
770 	if (subflow_req->mp_capable) {
771 		/* we can receive and accept an in-window, out-of-order pkt,
772 		 * which may not carry the MP_CAPABLE opt even on mptcp enabled
773 		 * paths: always try to extract the peer key, and fallback
774 		 * for packets missing it.
775 		 * Even OoO DSS packets coming legitly after dropped or
776 		 * reordered MPC will cause fallback, but we don't have other
777 		 * options.
778 		 */
779 		mptcp_get_options(skb, &mp_opt);
780 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
781 			fallback = true;
782 			goto create_child;
783 		}
784 
785 		new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
786 		if (!new_msk)
787 			fallback = true;
788 	} else if (subflow_req->mp_join) {
789 		mptcp_get_options(skb, &mp_opt);
790 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
791 		    !subflow_hmac_valid(req, &mp_opt) ||
792 		    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
793 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
794 			fallback = true;
795 		}
796 	}
797 
798 create_child:
799 	child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
800 						     req_unhash, own_req);
801 
802 	if (child && *own_req) {
803 		struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
804 
805 		tcp_rsk(req)->drop_req = false;
806 
807 		/* we need to fallback on ctx allocation failure and on pre-reqs
808 		 * checking above. In the latter scenario we additionally need
809 		 * to reset the context to non MPTCP status.
810 		 */
811 		if (!ctx || fallback) {
812 			if (fallback_is_fatal) {
813 				subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
814 				goto dispose_child;
815 			}
816 
817 			if (new_msk)
818 				mptcp_copy_inaddrs(new_msk, child);
819 			subflow_drop_ctx(child);
820 			goto out;
821 		}
822 
823 		/* ssk inherits options of listener sk */
824 		ctx->setsockopt_seq = listener->setsockopt_seq;
825 
826 		if (ctx->mp_capable) {
827 			/* this can't race with mptcp_close(), as the msk is
828 			 * not yet exposted to user-space
829 			 */
830 			inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
831 
832 			/* record the newly created socket as the first msk
833 			 * subflow, but don't link it yet into conn_list
834 			 */
835 			WRITE_ONCE(mptcp_sk(new_msk)->first, child);
836 
837 			/* new mpc subflow takes ownership of the newly
838 			 * created mptcp socket
839 			 */
840 			mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
841 			mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
842 			mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
843 			ctx->conn = new_msk;
844 			new_msk = NULL;
845 
846 			/* set msk addresses early to ensure mptcp_pm_get_local_id()
847 			 * uses the correct data
848 			 */
849 			mptcp_copy_inaddrs(ctx->conn, child);
850 
851 			/* with OoO packets we can reach here without ingress
852 			 * mpc option
853 			 */
854 			if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
855 				mptcp_subflow_fully_established(ctx, &mp_opt);
856 		} else if (ctx->mp_join) {
857 			struct mptcp_sock *owner;
858 
859 			owner = subflow_req->msk;
860 			if (!owner) {
861 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
862 				goto dispose_child;
863 			}
864 
865 			/* move the msk reference ownership to the subflow */
866 			subflow_req->msk = NULL;
867 			ctx->conn = (struct sock *)owner;
868 
869 			if (subflow_use_different_sport(owner, sk)) {
870 				pr_debug("ack inet_sport=%d %d",
871 					 ntohs(inet_sk(sk)->inet_sport),
872 					 ntohs(inet_sk((struct sock *)owner)->inet_sport));
873 				if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
874 					SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
875 					goto dispose_child;
876 				}
877 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
878 			}
879 
880 			if (!mptcp_finish_join(child))
881 				goto dispose_child;
882 
883 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
884 			tcp_rsk(req)->drop_req = true;
885 		}
886 	}
887 
888 out:
889 	/* dispose of the left over mptcp master, if any */
890 	if (unlikely(new_msk))
891 		mptcp_force_close(new_msk);
892 
893 	/* check for expected invariant - should never trigger, just help
894 	 * catching eariler subtle bugs
895 	 */
896 	WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
897 		     (!mptcp_subflow_ctx(child) ||
898 		      !mptcp_subflow_ctx(child)->conn));
899 	return child;
900 
901 dispose_child:
902 	subflow_drop_ctx(child);
903 	tcp_rsk(req)->drop_req = true;
904 	inet_csk_prepare_for_destroy_sock(child);
905 	tcp_done(child);
906 	req->rsk_ops->send_reset(sk, skb);
907 
908 	/* The last child reference will be released by the caller */
909 	return child;
910 }
911 
912 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
913 static struct proto tcp_prot_override;
914 
915 enum mapping_status {
916 	MAPPING_OK,
917 	MAPPING_INVALID,
918 	MAPPING_EMPTY,
919 	MAPPING_DATA_FIN,
920 	MAPPING_DUMMY,
921 	MAPPING_BAD_CSUM
922 };
923 
924 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
925 {
926 	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
927 		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
928 }
929 
930 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
931 {
932 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
933 	unsigned int skb_consumed;
934 
935 	skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
936 	if (WARN_ON_ONCE(skb_consumed >= skb->len))
937 		return true;
938 
939 	return skb->len - skb_consumed <= subflow->map_data_len -
940 					  mptcp_subflow_get_map_offset(subflow);
941 }
942 
943 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
944 {
945 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
946 	u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
947 
948 	if (unlikely(before(ssn, subflow->map_subflow_seq))) {
949 		/* Mapping covers data later in the subflow stream,
950 		 * currently unsupported.
951 		 */
952 		dbg_bad_map(subflow, ssn);
953 		return false;
954 	}
955 	if (unlikely(!before(ssn, subflow->map_subflow_seq +
956 				  subflow->map_data_len))) {
957 		/* Mapping does covers past subflow data, invalid */
958 		dbg_bad_map(subflow, ssn);
959 		return false;
960 	}
961 	return true;
962 }
963 
964 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
965 					      bool csum_reqd)
966 {
967 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
968 	u32 offset, seq, delta;
969 	__sum16 csum;
970 	int len;
971 
972 	if (!csum_reqd)
973 		return MAPPING_OK;
974 
975 	/* mapping already validated on previous traversal */
976 	if (subflow->map_csum_len == subflow->map_data_len)
977 		return MAPPING_OK;
978 
979 	/* traverse the receive queue, ensuring it contains a full
980 	 * DSS mapping and accumulating the related csum.
981 	 * Preserve the accoumlate csum across multiple calls, to compute
982 	 * the csum only once
983 	 */
984 	delta = subflow->map_data_len - subflow->map_csum_len;
985 	for (;;) {
986 		seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
987 		offset = seq - TCP_SKB_CB(skb)->seq;
988 
989 		/* if the current skb has not been accounted yet, csum its contents
990 		 * up to the amount covered by the current DSS
991 		 */
992 		if (offset < skb->len) {
993 			__wsum csum;
994 
995 			len = min(skb->len - offset, delta);
996 			csum = skb_checksum(skb, offset, len, 0);
997 			subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
998 								subflow->map_csum_len);
999 
1000 			delta -= len;
1001 			subflow->map_csum_len += len;
1002 		}
1003 		if (delta == 0)
1004 			break;
1005 
1006 		if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
1007 			/* if this subflow is closed, the partial mapping
1008 			 * will be never completed; flush the pending skbs, so
1009 			 * that subflow_sched_work_if_closed() can kick in
1010 			 */
1011 			if (unlikely(ssk->sk_state == TCP_CLOSE))
1012 				while ((skb = skb_peek(&ssk->sk_receive_queue)))
1013 					sk_eat_skb(ssk, skb);
1014 
1015 			/* not enough data to validate the csum */
1016 			return MAPPING_EMPTY;
1017 		}
1018 
1019 		/* the DSS mapping for next skbs will be validated later,
1020 		 * when a get_mapping_status call will process such skb
1021 		 */
1022 		skb = skb->next;
1023 	}
1024 
1025 	/* note that 'map_data_len' accounts only for the carried data, does
1026 	 * not include the eventual seq increment due to the data fin,
1027 	 * while the pseudo header requires the original DSS data len,
1028 	 * including that
1029 	 */
1030 	csum = __mptcp_make_csum(subflow->map_seq,
1031 				 subflow->map_subflow_seq,
1032 				 subflow->map_data_len + subflow->map_data_fin,
1033 				 subflow->map_data_csum);
1034 	if (unlikely(csum)) {
1035 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1036 		return MAPPING_BAD_CSUM;
1037 	}
1038 
1039 	subflow->valid_csum_seen = 1;
1040 	return MAPPING_OK;
1041 }
1042 
1043 static enum mapping_status get_mapping_status(struct sock *ssk,
1044 					      struct mptcp_sock *msk)
1045 {
1046 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1047 	bool csum_reqd = READ_ONCE(msk->csum_enabled);
1048 	struct mptcp_ext *mpext;
1049 	struct sk_buff *skb;
1050 	u16 data_len;
1051 	u64 map_seq;
1052 
1053 	skb = skb_peek(&ssk->sk_receive_queue);
1054 	if (!skb)
1055 		return MAPPING_EMPTY;
1056 
1057 	if (mptcp_check_fallback(ssk))
1058 		return MAPPING_DUMMY;
1059 
1060 	mpext = mptcp_get_ext(skb);
1061 	if (!mpext || !mpext->use_map) {
1062 		if (!subflow->map_valid && !skb->len) {
1063 			/* the TCP stack deliver 0 len FIN pkt to the receive
1064 			 * queue, that is the only 0len pkts ever expected here,
1065 			 * and we can admit no mapping only for 0 len pkts
1066 			 */
1067 			if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1068 				WARN_ONCE(1, "0len seq %d:%d flags %x",
1069 					  TCP_SKB_CB(skb)->seq,
1070 					  TCP_SKB_CB(skb)->end_seq,
1071 					  TCP_SKB_CB(skb)->tcp_flags);
1072 			sk_eat_skb(ssk, skb);
1073 			return MAPPING_EMPTY;
1074 		}
1075 
1076 		if (!subflow->map_valid)
1077 			return MAPPING_INVALID;
1078 
1079 		goto validate_seq;
1080 	}
1081 
1082 	trace_get_mapping_status(mpext);
1083 
1084 	data_len = mpext->data_len;
1085 	if (data_len == 0) {
1086 		pr_debug("infinite mapping received");
1087 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1088 		subflow->map_data_len = 0;
1089 		return MAPPING_INVALID;
1090 	}
1091 
1092 	if (mpext->data_fin == 1) {
1093 		if (data_len == 1) {
1094 			bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1095 								 mpext->dsn64);
1096 			pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1097 			if (subflow->map_valid) {
1098 				/* A DATA_FIN might arrive in a DSS
1099 				 * option before the previous mapping
1100 				 * has been fully consumed. Continue
1101 				 * handling the existing mapping.
1102 				 */
1103 				skb_ext_del(skb, SKB_EXT_MPTCP);
1104 				return MAPPING_OK;
1105 			} else {
1106 				if (updated && schedule_work(&msk->work))
1107 					sock_hold((struct sock *)msk);
1108 
1109 				return MAPPING_DATA_FIN;
1110 			}
1111 		} else {
1112 			u64 data_fin_seq = mpext->data_seq + data_len - 1;
1113 
1114 			/* If mpext->data_seq is a 32-bit value, data_fin_seq
1115 			 * must also be limited to 32 bits.
1116 			 */
1117 			if (!mpext->dsn64)
1118 				data_fin_seq &= GENMASK_ULL(31, 0);
1119 
1120 			mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1121 			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1122 				 data_fin_seq, mpext->dsn64);
1123 		}
1124 
1125 		/* Adjust for DATA_FIN using 1 byte of sequence space */
1126 		data_len--;
1127 	}
1128 
1129 	map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1130 	WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1131 
1132 	if (subflow->map_valid) {
1133 		/* Allow replacing only with an identical map */
1134 		if (subflow->map_seq == map_seq &&
1135 		    subflow->map_subflow_seq == mpext->subflow_seq &&
1136 		    subflow->map_data_len == data_len &&
1137 		    subflow->map_csum_reqd == mpext->csum_reqd) {
1138 			skb_ext_del(skb, SKB_EXT_MPTCP);
1139 			goto validate_csum;
1140 		}
1141 
1142 		/* If this skb data are fully covered by the current mapping,
1143 		 * the new map would need caching, which is not supported
1144 		 */
1145 		if (skb_is_fully_mapped(ssk, skb)) {
1146 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1147 			return MAPPING_INVALID;
1148 		}
1149 
1150 		/* will validate the next map after consuming the current one */
1151 		goto validate_csum;
1152 	}
1153 
1154 	subflow->map_seq = map_seq;
1155 	subflow->map_subflow_seq = mpext->subflow_seq;
1156 	subflow->map_data_len = data_len;
1157 	subflow->map_valid = 1;
1158 	subflow->map_data_fin = mpext->data_fin;
1159 	subflow->mpc_map = mpext->mpc_map;
1160 	subflow->map_csum_reqd = mpext->csum_reqd;
1161 	subflow->map_csum_len = 0;
1162 	subflow->map_data_csum = csum_unfold(mpext->csum);
1163 
1164 	/* Cfr RFC 8684 Section 3.3.0 */
1165 	if (unlikely(subflow->map_csum_reqd != csum_reqd))
1166 		return MAPPING_INVALID;
1167 
1168 	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1169 		 subflow->map_seq, subflow->map_subflow_seq,
1170 		 subflow->map_data_len, subflow->map_csum_reqd,
1171 		 subflow->map_data_csum);
1172 
1173 validate_seq:
1174 	/* we revalidate valid mapping on new skb, because we must ensure
1175 	 * the current skb is completely covered by the available mapping
1176 	 */
1177 	if (!validate_mapping(ssk, skb)) {
1178 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1179 		return MAPPING_INVALID;
1180 	}
1181 
1182 	skb_ext_del(skb, SKB_EXT_MPTCP);
1183 
1184 validate_csum:
1185 	return validate_data_csum(ssk, skb, csum_reqd);
1186 }
1187 
1188 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1189 				       u64 limit)
1190 {
1191 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1192 	bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1193 	u32 incr;
1194 
1195 	incr = limit >= skb->len ? skb->len + fin : limit;
1196 
1197 	pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1198 		 subflow->map_subflow_seq);
1199 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1200 	tcp_sk(ssk)->copied_seq += incr;
1201 	if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1202 		sk_eat_skb(ssk, skb);
1203 	if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1204 		subflow->map_valid = 0;
1205 }
1206 
1207 /* sched mptcp worker to remove the subflow if no more data is pending */
1208 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1209 {
1210 	struct sock *sk = (struct sock *)msk;
1211 
1212 	if (likely(ssk->sk_state != TCP_CLOSE))
1213 		return;
1214 
1215 	if (skb_queue_empty(&ssk->sk_receive_queue) &&
1216 	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
1217 		sock_hold(sk);
1218 		if (!schedule_work(&msk->work))
1219 			sock_put(sk);
1220 	}
1221 }
1222 
1223 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1224 {
1225 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1226 
1227 	if (subflow->mp_join)
1228 		return false;
1229 	else if (READ_ONCE(msk->csum_enabled))
1230 		return !subflow->valid_csum_seen;
1231 	else
1232 		return !subflow->fully_established;
1233 }
1234 
1235 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1236 {
1237 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1238 	unsigned long fail_tout;
1239 
1240 	/* greceful failure can happen only on the MPC subflow */
1241 	if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1242 		return;
1243 
1244 	/* since the close timeout take precedence on the fail one,
1245 	 * no need to start the latter when the first is already set
1246 	 */
1247 	if (sock_flag((struct sock *)msk, SOCK_DEAD))
1248 		return;
1249 
1250 	/* we don't need extreme accuracy here, use a zero fail_tout as special
1251 	 * value meaning no fail timeout at all;
1252 	 */
1253 	fail_tout = jiffies + TCP_RTO_MAX;
1254 	if (!fail_tout)
1255 		fail_tout = 1;
1256 	WRITE_ONCE(subflow->fail_tout, fail_tout);
1257 	tcp_send_ack(ssk);
1258 
1259 	mptcp_reset_timeout(msk, subflow->fail_tout);
1260 }
1261 
1262 static bool subflow_check_data_avail(struct sock *ssk)
1263 {
1264 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1265 	enum mapping_status status;
1266 	struct mptcp_sock *msk;
1267 	struct sk_buff *skb;
1268 
1269 	if (!skb_peek(&ssk->sk_receive_queue))
1270 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1271 	if (subflow->data_avail)
1272 		return true;
1273 
1274 	msk = mptcp_sk(subflow->conn);
1275 	for (;;) {
1276 		u64 ack_seq;
1277 		u64 old_ack;
1278 
1279 		status = get_mapping_status(ssk, msk);
1280 		trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1281 		if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1282 			     status == MAPPING_BAD_CSUM))
1283 			goto fallback;
1284 
1285 		if (status != MAPPING_OK)
1286 			goto no_data;
1287 
1288 		skb = skb_peek(&ssk->sk_receive_queue);
1289 		if (WARN_ON_ONCE(!skb))
1290 			goto no_data;
1291 
1292 		if (unlikely(!READ_ONCE(msk->can_ack)))
1293 			goto fallback;
1294 
1295 		old_ack = READ_ONCE(msk->ack_seq);
1296 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1297 		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1298 			 ack_seq);
1299 		if (unlikely(before64(ack_seq, old_ack))) {
1300 			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1301 			continue;
1302 		}
1303 
1304 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1305 		break;
1306 	}
1307 	return true;
1308 
1309 no_data:
1310 	subflow_sched_work_if_closed(msk, ssk);
1311 	return false;
1312 
1313 fallback:
1314 	if (!__mptcp_check_fallback(msk)) {
1315 		/* RFC 8684 section 3.7. */
1316 		if (status == MAPPING_BAD_CSUM &&
1317 		    (subflow->mp_join || subflow->valid_csum_seen)) {
1318 			subflow->send_mp_fail = 1;
1319 
1320 			if (!READ_ONCE(msk->allow_infinite_fallback)) {
1321 				subflow->reset_transient = 0;
1322 				subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1323 				goto reset;
1324 			}
1325 			mptcp_subflow_fail(msk, ssk);
1326 			WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1327 			return true;
1328 		}
1329 
1330 		if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1331 			/* fatal protocol error, close the socket.
1332 			 * subflow_error_report() will introduce the appropriate barriers
1333 			 */
1334 			subflow->reset_transient = 0;
1335 			subflow->reset_reason = MPTCP_RST_EMPTCP;
1336 
1337 reset:
1338 			ssk->sk_err = EBADMSG;
1339 			tcp_set_state(ssk, TCP_CLOSE);
1340 			while ((skb = skb_peek(&ssk->sk_receive_queue)))
1341 				sk_eat_skb(ssk, skb);
1342 			tcp_send_active_reset(ssk, GFP_ATOMIC);
1343 			WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1344 			return false;
1345 		}
1346 
1347 		mptcp_do_fallback(ssk);
1348 	}
1349 
1350 	skb = skb_peek(&ssk->sk_receive_queue);
1351 	subflow->map_valid = 1;
1352 	subflow->map_seq = READ_ONCE(msk->ack_seq);
1353 	subflow->map_data_len = skb->len;
1354 	subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1355 	WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1356 	return true;
1357 }
1358 
1359 bool mptcp_subflow_data_available(struct sock *sk)
1360 {
1361 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1362 
1363 	/* check if current mapping is still valid */
1364 	if (subflow->map_valid &&
1365 	    mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1366 		subflow->map_valid = 0;
1367 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1368 
1369 		pr_debug("Done with mapping: seq=%u data_len=%u",
1370 			 subflow->map_subflow_seq,
1371 			 subflow->map_data_len);
1372 	}
1373 
1374 	return subflow_check_data_avail(sk);
1375 }
1376 
1377 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1378  * not the ssk one.
1379  *
1380  * In mptcp, rwin is about the mptcp-level connection data.
1381  *
1382  * Data that is still on the ssk rx queue can thus be ignored,
1383  * as far as mptcp peer is concerned that data is still inflight.
1384  * DSS ACK is updated when skb is moved to the mptcp rx queue.
1385  */
1386 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1387 {
1388 	const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1389 	const struct sock *sk = subflow->conn;
1390 
1391 	*space = __mptcp_space(sk);
1392 	*full_space = tcp_full_space(sk);
1393 }
1394 
1395 void __mptcp_error_report(struct sock *sk)
1396 {
1397 	struct mptcp_subflow_context *subflow;
1398 	struct mptcp_sock *msk = mptcp_sk(sk);
1399 
1400 	mptcp_for_each_subflow(msk, subflow) {
1401 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1402 		int err = sock_error(ssk);
1403 		int ssk_state;
1404 
1405 		if (!err)
1406 			continue;
1407 
1408 		/* only propagate errors on fallen-back sockets or
1409 		 * on MPC connect
1410 		 */
1411 		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1412 			continue;
1413 
1414 		/* We need to propagate only transition to CLOSE state.
1415 		 * Orphaned socket will see such state change via
1416 		 * subflow_sched_work_if_closed() and that path will properly
1417 		 * destroy the msk as needed.
1418 		 */
1419 		ssk_state = inet_sk_state_load(ssk);
1420 		if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
1421 			inet_sk_state_store(sk, ssk_state);
1422 		sk->sk_err = -err;
1423 
1424 		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
1425 		smp_wmb();
1426 		sk_error_report(sk);
1427 		break;
1428 	}
1429 }
1430 
1431 static void subflow_error_report(struct sock *ssk)
1432 {
1433 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1434 
1435 	mptcp_data_lock(sk);
1436 	if (!sock_owned_by_user(sk))
1437 		__mptcp_error_report(sk);
1438 	else
1439 		__set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->cb_flags);
1440 	mptcp_data_unlock(sk);
1441 }
1442 
1443 static void subflow_data_ready(struct sock *sk)
1444 {
1445 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1446 	u16 state = 1 << inet_sk_state_load(sk);
1447 	struct sock *parent = subflow->conn;
1448 	struct mptcp_sock *msk;
1449 
1450 	trace_sk_data_ready(sk);
1451 
1452 	msk = mptcp_sk(parent);
1453 	if (state & TCPF_LISTEN) {
1454 		/* MPJ subflow are removed from accept queue before reaching here,
1455 		 * avoid stray wakeups
1456 		 */
1457 		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1458 			return;
1459 
1460 		parent->sk_data_ready(parent);
1461 		return;
1462 	}
1463 
1464 	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1465 		     !subflow->mp_join && !(state & TCPF_CLOSE));
1466 
1467 	if (mptcp_subflow_data_available(sk))
1468 		mptcp_data_ready(parent, sk);
1469 	else if (unlikely(sk->sk_err))
1470 		subflow_error_report(sk);
1471 }
1472 
1473 static void subflow_write_space(struct sock *ssk)
1474 {
1475 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1476 
1477 	mptcp_propagate_sndbuf(sk, ssk);
1478 	mptcp_write_space(sk);
1479 }
1480 
1481 static const struct inet_connection_sock_af_ops *
1482 subflow_default_af_ops(struct sock *sk)
1483 {
1484 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1485 	if (sk->sk_family == AF_INET6)
1486 		return &subflow_v6_specific;
1487 #endif
1488 	return &subflow_specific;
1489 }
1490 
1491 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1492 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1493 {
1494 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1495 	struct inet_connection_sock *icsk = inet_csk(sk);
1496 	const struct inet_connection_sock_af_ops *target;
1497 
1498 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1499 
1500 	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1501 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1502 
1503 	if (likely(icsk->icsk_af_ops == target))
1504 		return;
1505 
1506 	subflow->icsk_af_ops = icsk->icsk_af_ops;
1507 	icsk->icsk_af_ops = target;
1508 }
1509 #endif
1510 
1511 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1512 			 struct sockaddr_storage *addr,
1513 			 unsigned short family)
1514 {
1515 	memset(addr, 0, sizeof(*addr));
1516 	addr->ss_family = family;
1517 	if (addr->ss_family == AF_INET) {
1518 		struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1519 
1520 		if (info->family == AF_INET)
1521 			in_addr->sin_addr = info->addr;
1522 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1523 		else if (ipv6_addr_v4mapped(&info->addr6))
1524 			in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1525 #endif
1526 		in_addr->sin_port = info->port;
1527 	}
1528 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1529 	else if (addr->ss_family == AF_INET6) {
1530 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1531 
1532 		if (info->family == AF_INET)
1533 			ipv6_addr_set_v4mapped(info->addr.s_addr,
1534 					       &in6_addr->sin6_addr);
1535 		else
1536 			in6_addr->sin6_addr = info->addr6;
1537 		in6_addr->sin6_port = info->port;
1538 	}
1539 #endif
1540 }
1541 
1542 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1543 			    const struct mptcp_addr_info *remote)
1544 {
1545 	struct mptcp_sock *msk = mptcp_sk(sk);
1546 	struct mptcp_subflow_context *subflow;
1547 	struct sockaddr_storage addr;
1548 	int remote_id = remote->id;
1549 	int local_id = loc->id;
1550 	int err = -ENOTCONN;
1551 	struct socket *sf;
1552 	struct sock *ssk;
1553 	u32 remote_token;
1554 	int addrlen;
1555 	int ifindex;
1556 	u8 flags;
1557 
1558 	if (!mptcp_is_fully_established(sk))
1559 		goto err_out;
1560 
1561 	err = mptcp_subflow_create_socket(sk, loc->family, &sf);
1562 	if (err)
1563 		goto err_out;
1564 
1565 	ssk = sf->sk;
1566 	subflow = mptcp_subflow_ctx(ssk);
1567 	do {
1568 		get_random_bytes(&subflow->local_nonce, sizeof(u32));
1569 	} while (!subflow->local_nonce);
1570 
1571 	if (local_id)
1572 		subflow_set_local_id(subflow, local_id);
1573 
1574 	mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1575 					     &flags, &ifindex);
1576 	subflow->remote_key_valid = 1;
1577 	subflow->remote_key = msk->remote_key;
1578 	subflow->local_key = msk->local_key;
1579 	subflow->token = msk->token;
1580 	mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1581 
1582 	addrlen = sizeof(struct sockaddr_in);
1583 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1584 	if (addr.ss_family == AF_INET6)
1585 		addrlen = sizeof(struct sockaddr_in6);
1586 #endif
1587 	mptcp_sockopt_sync(msk, ssk);
1588 
1589 	ssk->sk_bound_dev_if = ifindex;
1590 	err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1591 	if (err)
1592 		goto failed;
1593 
1594 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1595 	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1596 		 remote_token, local_id, remote_id);
1597 	subflow->remote_token = remote_token;
1598 	subflow->remote_id = remote_id;
1599 	subflow->request_join = 1;
1600 	subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1601 	mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1602 
1603 	sock_hold(ssk);
1604 	list_add_tail(&subflow->node, &msk->conn_list);
1605 	err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1606 	if (err && err != -EINPROGRESS)
1607 		goto failed_unlink;
1608 
1609 	/* discard the subflow socket */
1610 	mptcp_sock_graft(ssk, sk->sk_socket);
1611 	iput(SOCK_INODE(sf));
1612 	WRITE_ONCE(msk->allow_infinite_fallback, false);
1613 	return 0;
1614 
1615 failed_unlink:
1616 	list_del(&subflow->node);
1617 	sock_put(mptcp_subflow_tcp_sock(subflow));
1618 
1619 failed:
1620 	subflow->disposable = 1;
1621 	sock_release(sf);
1622 
1623 err_out:
1624 	/* we account subflows before the creation, and this failures will not
1625 	 * be caught by sk_state_change()
1626 	 */
1627 	mptcp_pm_close_subflow(msk);
1628 	return err;
1629 }
1630 
1631 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1632 {
1633 #ifdef CONFIG_SOCK_CGROUP_DATA
1634 	struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1635 				*child_skcd = &child->sk_cgrp_data;
1636 
1637 	/* only the additional subflows created by kworkers have to be modified */
1638 	if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1639 	    cgroup_id(sock_cgroup_ptr(child_skcd))) {
1640 #ifdef CONFIG_MEMCG
1641 		struct mem_cgroup *memcg = parent->sk_memcg;
1642 
1643 		mem_cgroup_sk_free(child);
1644 		if (memcg && css_tryget(&memcg->css))
1645 			child->sk_memcg = memcg;
1646 #endif /* CONFIG_MEMCG */
1647 
1648 		cgroup_sk_free(child_skcd);
1649 		*child_skcd = *parent_skcd;
1650 		cgroup_sk_clone(child_skcd);
1651 	}
1652 #endif /* CONFIG_SOCK_CGROUP_DATA */
1653 }
1654 
1655 static void mptcp_subflow_ops_override(struct sock *ssk)
1656 {
1657 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1658 	if (ssk->sk_prot == &tcpv6_prot)
1659 		ssk->sk_prot = &tcpv6_prot_override;
1660 	else
1661 #endif
1662 		ssk->sk_prot = &tcp_prot_override;
1663 }
1664 
1665 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1666 {
1667 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1668 	if (ssk->sk_prot == &tcpv6_prot_override)
1669 		ssk->sk_prot = &tcpv6_prot;
1670 	else
1671 #endif
1672 		ssk->sk_prot = &tcp_prot;
1673 }
1674 
1675 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1676 				struct socket **new_sock)
1677 {
1678 	struct mptcp_subflow_context *subflow;
1679 	struct net *net = sock_net(sk);
1680 	struct socket *sf;
1681 	int err;
1682 
1683 	/* un-accepted server sockets can reach here - on bad configuration
1684 	 * bail early to avoid greater trouble later
1685 	 */
1686 	if (unlikely(!sk->sk_socket))
1687 		return -EINVAL;
1688 
1689 	err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1690 	if (err)
1691 		return err;
1692 
1693 	lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1694 
1695 	/* the newly created socket has to be in the same cgroup as its parent */
1696 	mptcp_attach_cgroup(sk, sf->sk);
1697 
1698 	/* kernel sockets do not by default acquire net ref, but TCP timer
1699 	 * needs it.
1700 	 * Update ns_tracker to current stack trace and refcounted tracker.
1701 	 */
1702 	__netns_tracker_free(net, &sf->sk->ns_tracker, false);
1703 	sf->sk->sk_net_refcnt = 1;
1704 	get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1705 	sock_inuse_add(net, 1);
1706 	err = tcp_set_ulp(sf->sk, "mptcp");
1707 	release_sock(sf->sk);
1708 
1709 	if (err) {
1710 		sock_release(sf);
1711 		return err;
1712 	}
1713 
1714 	/* the newly created socket really belongs to the owning MPTCP master
1715 	 * socket, even if for additional subflows the allocation is performed
1716 	 * by a kernel workqueue. Adjust inode references, so that the
1717 	 * procfs/diag interfaces really show this one belonging to the correct
1718 	 * user.
1719 	 */
1720 	SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1721 	SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1722 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1723 
1724 	subflow = mptcp_subflow_ctx(sf->sk);
1725 	pr_debug("subflow=%p", subflow);
1726 
1727 	*new_sock = sf;
1728 	sock_hold(sk);
1729 	subflow->conn = sk;
1730 	mptcp_subflow_ops_override(sf->sk);
1731 
1732 	return 0;
1733 }
1734 
1735 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1736 							gfp_t priority)
1737 {
1738 	struct inet_connection_sock *icsk = inet_csk(sk);
1739 	struct mptcp_subflow_context *ctx;
1740 
1741 	ctx = kzalloc(sizeof(*ctx), priority);
1742 	if (!ctx)
1743 		return NULL;
1744 
1745 	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1746 	INIT_LIST_HEAD(&ctx->node);
1747 	INIT_LIST_HEAD(&ctx->delegated_node);
1748 
1749 	pr_debug("subflow=%p", ctx);
1750 
1751 	ctx->tcp_sock = sk;
1752 
1753 	return ctx;
1754 }
1755 
1756 static void __subflow_state_change(struct sock *sk)
1757 {
1758 	struct socket_wq *wq;
1759 
1760 	rcu_read_lock();
1761 	wq = rcu_dereference(sk->sk_wq);
1762 	if (skwq_has_sleeper(wq))
1763 		wake_up_interruptible_all(&wq->wait);
1764 	rcu_read_unlock();
1765 }
1766 
1767 static bool subflow_is_done(const struct sock *sk)
1768 {
1769 	return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1770 }
1771 
1772 static void subflow_state_change(struct sock *sk)
1773 {
1774 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1775 	struct sock *parent = subflow->conn;
1776 
1777 	__subflow_state_change(sk);
1778 
1779 	if (subflow_simultaneous_connect(sk)) {
1780 		mptcp_propagate_sndbuf(parent, sk);
1781 		mptcp_do_fallback(sk);
1782 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
1783 		pr_fallback(mptcp_sk(parent));
1784 		subflow->conn_finished = 1;
1785 		mptcp_set_connected(parent);
1786 	}
1787 
1788 	/* as recvmsg() does not acquire the subflow socket for ssk selection
1789 	 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1790 	 * the data available machinery here.
1791 	 */
1792 	if (mptcp_subflow_data_available(sk))
1793 		mptcp_data_ready(parent, sk);
1794 	else if (unlikely(sk->sk_err))
1795 		subflow_error_report(sk);
1796 
1797 	subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1798 
1799 	if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1800 	    !subflow->rx_eof && subflow_is_done(sk)) {
1801 		subflow->rx_eof = 1;
1802 		mptcp_subflow_eof(parent);
1803 	}
1804 }
1805 
1806 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
1807 {
1808 	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1809 	struct mptcp_sock *msk, *next, *head = NULL;
1810 	struct request_sock *req;
1811 
1812 	/* build a list of all unaccepted mptcp sockets */
1813 	spin_lock_bh(&queue->rskq_lock);
1814 	for (req = queue->rskq_accept_head; req; req = req->dl_next) {
1815 		struct mptcp_subflow_context *subflow;
1816 		struct sock *ssk = req->sk;
1817 		struct mptcp_sock *msk;
1818 
1819 		if (!sk_is_mptcp(ssk))
1820 			continue;
1821 
1822 		subflow = mptcp_subflow_ctx(ssk);
1823 		if (!subflow || !subflow->conn)
1824 			continue;
1825 
1826 		/* skip if already in list */
1827 		msk = mptcp_sk(subflow->conn);
1828 		if (msk->dl_next || msk == head)
1829 			continue;
1830 
1831 		msk->dl_next = head;
1832 		head = msk;
1833 	}
1834 	spin_unlock_bh(&queue->rskq_lock);
1835 	if (!head)
1836 		return;
1837 
1838 	/* can't acquire the msk socket lock under the subflow one,
1839 	 * or will cause ABBA deadlock
1840 	 */
1841 	release_sock(listener_ssk);
1842 
1843 	for (msk = head; msk; msk = next) {
1844 		struct sock *sk = (struct sock *)msk;
1845 		bool do_cancel_work;
1846 
1847 		sock_hold(sk);
1848 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1849 		next = msk->dl_next;
1850 		msk->first = NULL;
1851 		msk->dl_next = NULL;
1852 
1853 		do_cancel_work = __mptcp_close(sk, 0);
1854 		release_sock(sk);
1855 		if (do_cancel_work) {
1856 			/* lockdep will report a false positive ABBA deadlock
1857 			 * between cancel_work_sync and the listener socket.
1858 			 * The involved locks belong to different sockets WRT
1859 			 * the existing AB chain.
1860 			 * Using a per socket key is problematic as key
1861 			 * deregistration requires process context and must be
1862 			 * performed at socket disposal time, in atomic
1863 			 * context.
1864 			 * Just tell lockdep to consider the listener socket
1865 			 * released here.
1866 			 */
1867 			mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
1868 			mptcp_cancel_work(sk);
1869 			mutex_acquire(&listener_sk->sk_lock.dep_map,
1870 				      SINGLE_DEPTH_NESTING, 0, _RET_IP_);
1871 		}
1872 		sock_put(sk);
1873 	}
1874 
1875 	/* we are still under the listener msk socket lock */
1876 	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1877 }
1878 
1879 static int subflow_ulp_init(struct sock *sk)
1880 {
1881 	struct inet_connection_sock *icsk = inet_csk(sk);
1882 	struct mptcp_subflow_context *ctx;
1883 	struct tcp_sock *tp = tcp_sk(sk);
1884 	int err = 0;
1885 
1886 	/* disallow attaching ULP to a socket unless it has been
1887 	 * created with sock_create_kern()
1888 	 */
1889 	if (!sk->sk_kern_sock) {
1890 		err = -EOPNOTSUPP;
1891 		goto out;
1892 	}
1893 
1894 	ctx = subflow_create_ctx(sk, GFP_KERNEL);
1895 	if (!ctx) {
1896 		err = -ENOMEM;
1897 		goto out;
1898 	}
1899 
1900 	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1901 
1902 	tp->is_mptcp = 1;
1903 	ctx->icsk_af_ops = icsk->icsk_af_ops;
1904 	icsk->icsk_af_ops = subflow_default_af_ops(sk);
1905 	ctx->tcp_state_change = sk->sk_state_change;
1906 	ctx->tcp_error_report = sk->sk_error_report;
1907 
1908 	WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1909 	WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1910 
1911 	sk->sk_data_ready = subflow_data_ready;
1912 	sk->sk_write_space = subflow_write_space;
1913 	sk->sk_state_change = subflow_state_change;
1914 	sk->sk_error_report = subflow_error_report;
1915 out:
1916 	return err;
1917 }
1918 
1919 static void subflow_ulp_release(struct sock *ssk)
1920 {
1921 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1922 	bool release = true;
1923 	struct sock *sk;
1924 
1925 	if (!ctx)
1926 		return;
1927 
1928 	sk = ctx->conn;
1929 	if (sk) {
1930 		/* if the msk has been orphaned, keep the ctx
1931 		 * alive, will be freed by __mptcp_close_ssk(),
1932 		 * when the subflow is still unaccepted
1933 		 */
1934 		release = ctx->disposable || list_empty(&ctx->node);
1935 		sock_put(sk);
1936 	}
1937 
1938 	mptcp_subflow_ops_undo_override(ssk);
1939 	if (release)
1940 		kfree_rcu(ctx, rcu);
1941 }
1942 
1943 static void subflow_ulp_clone(const struct request_sock *req,
1944 			      struct sock *newsk,
1945 			      const gfp_t priority)
1946 {
1947 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1948 	struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1949 	struct mptcp_subflow_context *new_ctx;
1950 
1951 	if (!tcp_rsk(req)->is_mptcp ||
1952 	    (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1953 		subflow_ulp_fallback(newsk, old_ctx);
1954 		return;
1955 	}
1956 
1957 	new_ctx = subflow_create_ctx(newsk, priority);
1958 	if (!new_ctx) {
1959 		subflow_ulp_fallback(newsk, old_ctx);
1960 		return;
1961 	}
1962 
1963 	new_ctx->conn_finished = 1;
1964 	new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1965 	new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1966 	new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1967 	new_ctx->rel_write_seq = 1;
1968 	new_ctx->tcp_sock = newsk;
1969 
1970 	if (subflow_req->mp_capable) {
1971 		/* see comments in subflow_syn_recv_sock(), MPTCP connection
1972 		 * is fully established only after we receive the remote key
1973 		 */
1974 		new_ctx->mp_capable = 1;
1975 		new_ctx->local_key = subflow_req->local_key;
1976 		new_ctx->token = subflow_req->token;
1977 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1978 		new_ctx->idsn = subflow_req->idsn;
1979 
1980 		/* this is the first subflow, id is always 0 */
1981 		new_ctx->local_id_valid = 1;
1982 	} else if (subflow_req->mp_join) {
1983 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1984 		new_ctx->mp_join = 1;
1985 		new_ctx->fully_established = 1;
1986 		new_ctx->remote_key_valid = 1;
1987 		new_ctx->backup = subflow_req->backup;
1988 		new_ctx->remote_id = subflow_req->remote_id;
1989 		new_ctx->token = subflow_req->token;
1990 		new_ctx->thmac = subflow_req->thmac;
1991 
1992 		/* the subflow req id is valid, fetched via subflow_check_req()
1993 		 * and subflow_token_join_request()
1994 		 */
1995 		subflow_set_local_id(new_ctx, subflow_req->local_id);
1996 	}
1997 }
1998 
1999 static void tcp_release_cb_override(struct sock *ssk)
2000 {
2001 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
2002 
2003 	if (mptcp_subflow_has_delegated_action(subflow))
2004 		mptcp_subflow_process_delegated(ssk);
2005 
2006 	tcp_release_cb(ssk);
2007 }
2008 
2009 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
2010 	.name		= "mptcp",
2011 	.owner		= THIS_MODULE,
2012 	.init		= subflow_ulp_init,
2013 	.release	= subflow_ulp_release,
2014 	.clone		= subflow_ulp_clone,
2015 };
2016 
2017 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
2018 {
2019 	subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
2020 
2021 	subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
2022 					      subflow_ops->obj_size, 0,
2023 					      SLAB_ACCOUNT |
2024 					      SLAB_TYPESAFE_BY_RCU,
2025 					      NULL);
2026 	if (!subflow_ops->slab)
2027 		return -ENOMEM;
2028 
2029 	return 0;
2030 }
2031 
2032 void __init mptcp_subflow_init(void)
2033 {
2034 	mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
2035 	mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
2036 	mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
2037 
2038 	if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
2039 		panic("MPTCP: failed to init subflow v4 request sock ops\n");
2040 
2041 	subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
2042 	subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
2043 	subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
2044 
2045 	subflow_specific = ipv4_specific;
2046 	subflow_specific.conn_request = subflow_v4_conn_request;
2047 	subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
2048 	subflow_specific.sk_rx_dst_set = subflow_finish_connect;
2049 	subflow_specific.rebuild_header = subflow_rebuild_header;
2050 
2051 	tcp_prot_override = tcp_prot;
2052 	tcp_prot_override.release_cb = tcp_release_cb_override;
2053 
2054 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2055 	/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
2056 	 * structures for v4 and v6 have the same size. It should not changed in
2057 	 * the future but better to make sure to be warned if it is no longer
2058 	 * the case.
2059 	 */
2060 	BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
2061 
2062 	mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
2063 	mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
2064 	mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
2065 
2066 	if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
2067 		panic("MPTCP: failed to init subflow v6 request sock ops\n");
2068 
2069 	subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
2070 	subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
2071 	subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
2072 
2073 	subflow_v6_specific = ipv6_specific;
2074 	subflow_v6_specific.conn_request = subflow_v6_conn_request;
2075 	subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
2076 	subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
2077 	subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
2078 
2079 	subflow_v6m_specific = subflow_v6_specific;
2080 	subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
2081 	subflow_v6m_specific.send_check = ipv4_specific.send_check;
2082 	subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
2083 	subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
2084 	subflow_v6m_specific.net_frag_header_len = 0;
2085 	subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
2086 
2087 	tcpv6_prot_override = tcpv6_prot;
2088 	tcpv6_prot_override.release_cb = tcp_release_cb_override;
2089 #endif
2090 
2091 	mptcp_diag_subflow_init(&subflow_ulp_ops);
2092 
2093 	if (tcp_register_ulp(&subflow_ulp_ops) != 0)
2094 		panic("MPTCP: failed to register subflows to ULP\n");
2095 }
2096