xref: /linux/net/mptcp/subflow.c (revision 704fd176204577459beadb37d46e164d376fabc3)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
22 #endif
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
25 #include "protocol.h"
26 #include "mib.h"
27 
28 #include <trace/events/mptcp.h>
29 
30 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
31 
32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
33 				  enum linux_mptcp_mib_field field)
34 {
35 	MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
36 }
37 
38 static void subflow_req_destructor(struct request_sock *req)
39 {
40 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
41 
42 	pr_debug("subflow_req=%p", subflow_req);
43 
44 	if (subflow_req->msk)
45 		sock_put((struct sock *)subflow_req->msk);
46 
47 	mptcp_token_destroy_request(req);
48 	tcp_request_sock_ops.destructor(req);
49 }
50 
51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
52 				  void *hmac)
53 {
54 	u8 msg[8];
55 
56 	put_unaligned_be32(nonce1, &msg[0]);
57 	put_unaligned_be32(nonce2, &msg[4]);
58 
59 	mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
60 }
61 
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 {
64 	return mptcp_is_fully_established((void *)msk) &&
65 	       READ_ONCE(msk->pm.accept_subflow);
66 }
67 
68 /* validate received token and create truncated hmac and nonce for SYN-ACK */
69 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
70 {
71 	struct mptcp_sock *msk = subflow_req->msk;
72 	u8 hmac[SHA256_DIGEST_SIZE];
73 
74 	get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
75 
76 	subflow_generate_hmac(msk->local_key, msk->remote_key,
77 			      subflow_req->local_nonce,
78 			      subflow_req->remote_nonce, hmac);
79 
80 	subflow_req->thmac = get_unaligned_be64(hmac);
81 }
82 
83 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
84 {
85 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
86 	struct mptcp_sock *msk;
87 	int local_id;
88 
89 	msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
90 	if (!msk) {
91 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
92 		return NULL;
93 	}
94 
95 	local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
96 	if (local_id < 0) {
97 		sock_put((struct sock *)msk);
98 		return NULL;
99 	}
100 	subflow_req->local_id = local_id;
101 
102 	return msk;
103 }
104 
105 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
106 {
107 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
108 
109 	subflow_req->mp_capable = 0;
110 	subflow_req->mp_join = 0;
111 	subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
112 	subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
113 	subflow_req->msk = NULL;
114 	mptcp_token_init_request(req);
115 }
116 
117 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
118 {
119 	return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
120 }
121 
122 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
123 {
124 	struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
125 
126 	if (mpext) {
127 		memset(mpext, 0, sizeof(*mpext));
128 		mpext->reset_reason = reason;
129 	}
130 }
131 
132 /* Init mptcp request socket.
133  *
134  * Returns an error code if a JOIN has failed and a TCP reset
135  * should be sent.
136  */
137 static int subflow_check_req(struct request_sock *req,
138 			     const struct sock *sk_listener,
139 			     struct sk_buff *skb)
140 {
141 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
142 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
143 	struct mptcp_options_received mp_opt;
144 	bool opt_mp_capable, opt_mp_join;
145 
146 	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
147 
148 #ifdef CONFIG_TCP_MD5SIG
149 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
150 	 * TCP option space.
151 	 */
152 	if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
153 		return -EINVAL;
154 #endif
155 
156 	mptcp_get_options(skb, &mp_opt);
157 
158 	opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
159 	opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
160 	if (opt_mp_capable) {
161 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
162 
163 		if (opt_mp_join)
164 			return 0;
165 	} else if (opt_mp_join) {
166 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
167 	}
168 
169 	if (opt_mp_capable && listener->request_mptcp) {
170 		int err, retries = MPTCP_TOKEN_MAX_RETRIES;
171 
172 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
173 again:
174 		do {
175 			get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
176 		} while (subflow_req->local_key == 0);
177 
178 		if (unlikely(req->syncookie)) {
179 			mptcp_crypto_key_sha(subflow_req->local_key,
180 					     &subflow_req->token,
181 					     &subflow_req->idsn);
182 			if (mptcp_token_exists(subflow_req->token)) {
183 				if (retries-- > 0)
184 					goto again;
185 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
186 			} else {
187 				subflow_req->mp_capable = 1;
188 			}
189 			return 0;
190 		}
191 
192 		err = mptcp_token_new_request(req);
193 		if (err == 0)
194 			subflow_req->mp_capable = 1;
195 		else if (retries-- > 0)
196 			goto again;
197 		else
198 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
199 
200 	} else if (opt_mp_join && listener->request_mptcp) {
201 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
202 		subflow_req->mp_join = 1;
203 		subflow_req->backup = mp_opt.backup;
204 		subflow_req->remote_id = mp_opt.join_id;
205 		subflow_req->token = mp_opt.token;
206 		subflow_req->remote_nonce = mp_opt.nonce;
207 		subflow_req->msk = subflow_token_join_request(req);
208 
209 		/* Can't fall back to TCP in this case. */
210 		if (!subflow_req->msk) {
211 			subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
212 			return -EPERM;
213 		}
214 
215 		if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
216 			pr_debug("syn inet_sport=%d %d",
217 				 ntohs(inet_sk(sk_listener)->inet_sport),
218 				 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
219 			if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
220 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
221 				return -EPERM;
222 			}
223 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
224 		}
225 
226 		subflow_req_create_thmac(subflow_req);
227 
228 		if (unlikely(req->syncookie)) {
229 			if (mptcp_can_accept_new_subflow(subflow_req->msk))
230 				subflow_init_req_cookie_join_save(subflow_req, skb);
231 			else
232 				return -EPERM;
233 		}
234 
235 		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
236 			 subflow_req->remote_nonce, subflow_req->msk);
237 	}
238 
239 	return 0;
240 }
241 
242 int mptcp_subflow_init_cookie_req(struct request_sock *req,
243 				  const struct sock *sk_listener,
244 				  struct sk_buff *skb)
245 {
246 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
247 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
248 	struct mptcp_options_received mp_opt;
249 	bool opt_mp_capable, opt_mp_join;
250 	int err;
251 
252 	subflow_init_req(req, sk_listener);
253 	mptcp_get_options(skb, &mp_opt);
254 
255 	opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
256 	opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
257 	if (opt_mp_capable && opt_mp_join)
258 		return -EINVAL;
259 
260 	if (opt_mp_capable && listener->request_mptcp) {
261 		if (mp_opt.sndr_key == 0)
262 			return -EINVAL;
263 
264 		subflow_req->local_key = mp_opt.rcvr_key;
265 		err = mptcp_token_new_request(req);
266 		if (err)
267 			return err;
268 
269 		subflow_req->mp_capable = 1;
270 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
271 	} else if (opt_mp_join && listener->request_mptcp) {
272 		if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
273 			return -EINVAL;
274 
275 		subflow_req->mp_join = 1;
276 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
277 	}
278 
279 	return 0;
280 }
281 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
282 
283 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
284 					      struct sk_buff *skb,
285 					      struct flowi *fl,
286 					      struct request_sock *req)
287 {
288 	struct dst_entry *dst;
289 	int err;
290 
291 	tcp_rsk(req)->is_mptcp = 1;
292 	subflow_init_req(req, sk);
293 
294 	dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
295 	if (!dst)
296 		return NULL;
297 
298 	err = subflow_check_req(req, sk, skb);
299 	if (err == 0)
300 		return dst;
301 
302 	dst_release(dst);
303 	if (!req->syncookie)
304 		tcp_request_sock_ops.send_reset(sk, skb);
305 	return NULL;
306 }
307 
308 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
309 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
310 					      struct sk_buff *skb,
311 					      struct flowi *fl,
312 					      struct request_sock *req)
313 {
314 	struct dst_entry *dst;
315 	int err;
316 
317 	tcp_rsk(req)->is_mptcp = 1;
318 	subflow_init_req(req, sk);
319 
320 	dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
321 	if (!dst)
322 		return NULL;
323 
324 	err = subflow_check_req(req, sk, skb);
325 	if (err == 0)
326 		return dst;
327 
328 	dst_release(dst);
329 	if (!req->syncookie)
330 		tcp6_request_sock_ops.send_reset(sk, skb);
331 	return NULL;
332 }
333 #endif
334 
335 /* validate received truncated hmac and create hmac for third ACK */
336 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
337 {
338 	u8 hmac[SHA256_DIGEST_SIZE];
339 	u64 thmac;
340 
341 	subflow_generate_hmac(subflow->remote_key, subflow->local_key,
342 			      subflow->remote_nonce, subflow->local_nonce,
343 			      hmac);
344 
345 	thmac = get_unaligned_be64(hmac);
346 	pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
347 		 subflow, subflow->token, thmac, subflow->thmac);
348 
349 	return thmac == subflow->thmac;
350 }
351 
352 void mptcp_subflow_reset(struct sock *ssk)
353 {
354 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
355 	struct sock *sk = subflow->conn;
356 
357 	/* must hold: tcp_done() could drop last reference on parent */
358 	sock_hold(sk);
359 
360 	tcp_set_state(ssk, TCP_CLOSE);
361 	tcp_send_active_reset(ssk, GFP_ATOMIC);
362 	tcp_done(ssk);
363 	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
364 	    schedule_work(&mptcp_sk(sk)->work))
365 		return; /* worker will put sk for us */
366 
367 	sock_put(sk);
368 }
369 
370 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
371 {
372 	return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
373 }
374 
375 void __mptcp_set_connected(struct sock *sk)
376 {
377 	if (sk->sk_state == TCP_SYN_SENT) {
378 		inet_sk_state_store(sk, TCP_ESTABLISHED);
379 		sk->sk_state_change(sk);
380 	}
381 }
382 
383 static void mptcp_set_connected(struct sock *sk)
384 {
385 	mptcp_data_lock(sk);
386 	if (!sock_owned_by_user(sk))
387 		__mptcp_set_connected(sk);
388 	else
389 		__set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
390 	mptcp_data_unlock(sk);
391 }
392 
393 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
394 {
395 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
396 	struct mptcp_options_received mp_opt;
397 	struct sock *parent = subflow->conn;
398 
399 	subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
400 
401 	/* be sure no special action on any packet other than syn-ack */
402 	if (subflow->conn_finished)
403 		return;
404 
405 	mptcp_propagate_sndbuf(parent, sk);
406 	subflow->rel_write_seq = 1;
407 	subflow->conn_finished = 1;
408 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
409 	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
410 
411 	mptcp_get_options(skb, &mp_opt);
412 	if (subflow->request_mptcp) {
413 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
414 			MPTCP_INC_STATS(sock_net(sk),
415 					MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
416 			mptcp_do_fallback(sk);
417 			pr_fallback(mptcp_sk(subflow->conn));
418 			goto fallback;
419 		}
420 
421 		if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
422 			WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true);
423 		if (mp_opt.deny_join_id0)
424 			WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true);
425 		subflow->mp_capable = 1;
426 		subflow->can_ack = 1;
427 		subflow->remote_key = mp_opt.sndr_key;
428 		pr_debug("subflow=%p, remote_key=%llu", subflow,
429 			 subflow->remote_key);
430 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
431 		mptcp_finish_connect(sk);
432 		mptcp_set_connected(parent);
433 	} else if (subflow->request_join) {
434 		u8 hmac[SHA256_DIGEST_SIZE];
435 
436 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
437 			subflow->reset_reason = MPTCP_RST_EMPTCP;
438 			goto do_reset;
439 		}
440 
441 		subflow->backup = mp_opt.backup;
442 		subflow->thmac = mp_opt.thmac;
443 		subflow->remote_nonce = mp_opt.nonce;
444 		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
445 			 subflow, subflow->thmac, subflow->remote_nonce,
446 			 subflow->backup);
447 
448 		if (!subflow_thmac_valid(subflow)) {
449 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
450 			subflow->reset_reason = MPTCP_RST_EMPTCP;
451 			goto do_reset;
452 		}
453 
454 		if (!mptcp_finish_join(sk))
455 			goto do_reset;
456 
457 		subflow_generate_hmac(subflow->local_key, subflow->remote_key,
458 				      subflow->local_nonce,
459 				      subflow->remote_nonce,
460 				      hmac);
461 		memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
462 
463 		subflow->mp_join = 1;
464 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
465 
466 		if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
467 			pr_debug("synack inet_dport=%d %d",
468 				 ntohs(inet_sk(sk)->inet_dport),
469 				 ntohs(inet_sk(parent)->inet_dport));
470 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
471 		}
472 	} else if (mptcp_check_fallback(sk)) {
473 fallback:
474 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
475 		mptcp_set_connected(parent);
476 	}
477 	return;
478 
479 do_reset:
480 	subflow->reset_transient = 0;
481 	mptcp_subflow_reset(sk);
482 }
483 
484 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
485 {
486 	subflow->local_id = local_id;
487 	subflow->local_id_valid = 1;
488 }
489 
490 static int subflow_chk_local_id(struct sock *sk)
491 {
492 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
493 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
494 	int err;
495 
496 	if (likely(subflow->local_id_valid))
497 		return 0;
498 
499 	err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
500 	if (err < 0)
501 		return err;
502 
503 	subflow_set_local_id(subflow, err);
504 	return 0;
505 }
506 
507 static int subflow_rebuild_header(struct sock *sk)
508 {
509 	int err = subflow_chk_local_id(sk);
510 
511 	if (unlikely(err < 0))
512 		return err;
513 
514 	return inet_sk_rebuild_header(sk);
515 }
516 
517 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
518 static int subflow_v6_rebuild_header(struct sock *sk)
519 {
520 	int err = subflow_chk_local_id(sk);
521 
522 	if (unlikely(err < 0))
523 		return err;
524 
525 	return inet6_sk_rebuild_header(sk);
526 }
527 #endif
528 
529 struct request_sock_ops mptcp_subflow_request_sock_ops;
530 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
531 
532 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
533 {
534 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
535 
536 	pr_debug("subflow=%p", subflow);
537 
538 	/* Never answer to SYNs sent to broadcast or multicast */
539 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
540 		goto drop;
541 
542 	return tcp_conn_request(&mptcp_subflow_request_sock_ops,
543 				&subflow_request_sock_ipv4_ops,
544 				sk, skb);
545 drop:
546 	tcp_listendrop(sk);
547 	return 0;
548 }
549 
550 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
551 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
552 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
553 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
554 static struct proto tcpv6_prot_override;
555 
556 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
557 {
558 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
559 
560 	pr_debug("subflow=%p", subflow);
561 
562 	if (skb->protocol == htons(ETH_P_IP))
563 		return subflow_v4_conn_request(sk, skb);
564 
565 	if (!ipv6_unicast_destination(skb))
566 		goto drop;
567 
568 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
569 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
570 		return 0;
571 	}
572 
573 	return tcp_conn_request(&mptcp_subflow_request_sock_ops,
574 				&subflow_request_sock_ipv6_ops, sk, skb);
575 
576 drop:
577 	tcp_listendrop(sk);
578 	return 0; /* don't send reset */
579 }
580 #endif
581 
582 /* validate hmac received in third ACK */
583 static bool subflow_hmac_valid(const struct request_sock *req,
584 			       const struct mptcp_options_received *mp_opt)
585 {
586 	const struct mptcp_subflow_request_sock *subflow_req;
587 	u8 hmac[SHA256_DIGEST_SIZE];
588 	struct mptcp_sock *msk;
589 
590 	subflow_req = mptcp_subflow_rsk(req);
591 	msk = subflow_req->msk;
592 	if (!msk)
593 		return false;
594 
595 	subflow_generate_hmac(msk->remote_key, msk->local_key,
596 			      subflow_req->remote_nonce,
597 			      subflow_req->local_nonce, hmac);
598 
599 	return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
600 }
601 
602 static void mptcp_sock_destruct(struct sock *sk)
603 {
604 	/* if new mptcp socket isn't accepted, it is free'd
605 	 * from the tcp listener sockets request queue, linked
606 	 * from req->sk.  The tcp socket is released.
607 	 * This calls the ULP release function which will
608 	 * also remove the mptcp socket, via
609 	 * sock_put(ctx->conn).
610 	 *
611 	 * Problem is that the mptcp socket will be in
612 	 * ESTABLISHED state and will not have the SOCK_DEAD flag.
613 	 * Both result in warnings from inet_sock_destruct.
614 	 */
615 	if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
616 		sk->sk_state = TCP_CLOSE;
617 		WARN_ON_ONCE(sk->sk_socket);
618 		sock_orphan(sk);
619 	}
620 
621 	mptcp_destroy_common(mptcp_sk(sk));
622 	inet_sock_destruct(sk);
623 }
624 
625 static void mptcp_force_close(struct sock *sk)
626 {
627 	/* the msk is not yet exposed to user-space */
628 	inet_sk_state_store(sk, TCP_CLOSE);
629 	sk_common_release(sk);
630 }
631 
632 static void subflow_ulp_fallback(struct sock *sk,
633 				 struct mptcp_subflow_context *old_ctx)
634 {
635 	struct inet_connection_sock *icsk = inet_csk(sk);
636 
637 	mptcp_subflow_tcp_fallback(sk, old_ctx);
638 	icsk->icsk_ulp_ops = NULL;
639 	rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
640 	tcp_sk(sk)->is_mptcp = 0;
641 
642 	mptcp_subflow_ops_undo_override(sk);
643 }
644 
645 static void subflow_drop_ctx(struct sock *ssk)
646 {
647 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
648 
649 	if (!ctx)
650 		return;
651 
652 	subflow_ulp_fallback(ssk, ctx);
653 	if (ctx->conn)
654 		sock_put(ctx->conn);
655 
656 	kfree_rcu(ctx, rcu);
657 }
658 
659 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
660 				     struct mptcp_options_received *mp_opt)
661 {
662 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
663 
664 	subflow->remote_key = mp_opt->sndr_key;
665 	subflow->fully_established = 1;
666 	subflow->can_ack = 1;
667 	WRITE_ONCE(msk->fully_established, true);
668 }
669 
670 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
671 					  struct sk_buff *skb,
672 					  struct request_sock *req,
673 					  struct dst_entry *dst,
674 					  struct request_sock *req_unhash,
675 					  bool *own_req)
676 {
677 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
678 	struct mptcp_subflow_request_sock *subflow_req;
679 	struct mptcp_options_received mp_opt;
680 	bool fallback, fallback_is_fatal;
681 	struct sock *new_msk = NULL;
682 	struct sock *child;
683 
684 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
685 
686 	/* After child creation we must look for MPC even when options
687 	 * are not parsed
688 	 */
689 	mp_opt.suboptions = 0;
690 
691 	/* hopefully temporary handling for MP_JOIN+syncookie */
692 	subflow_req = mptcp_subflow_rsk(req);
693 	fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
694 	fallback = !tcp_rsk(req)->is_mptcp;
695 	if (fallback)
696 		goto create_child;
697 
698 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
699 	if (subflow_req->mp_capable) {
700 		/* we can receive and accept an in-window, out-of-order pkt,
701 		 * which may not carry the MP_CAPABLE opt even on mptcp enabled
702 		 * paths: always try to extract the peer key, and fallback
703 		 * for packets missing it.
704 		 * Even OoO DSS packets coming legitly after dropped or
705 		 * reordered MPC will cause fallback, but we don't have other
706 		 * options.
707 		 */
708 		mptcp_get_options(skb, &mp_opt);
709 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
710 			fallback = true;
711 			goto create_child;
712 		}
713 
714 		new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
715 		if (!new_msk)
716 			fallback = true;
717 	} else if (subflow_req->mp_join) {
718 		mptcp_get_options(skb, &mp_opt);
719 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
720 		    !subflow_hmac_valid(req, &mp_opt) ||
721 		    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
722 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
723 			fallback = true;
724 		}
725 	}
726 
727 create_child:
728 	child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
729 						     req_unhash, own_req);
730 
731 	if (child && *own_req) {
732 		struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
733 
734 		tcp_rsk(req)->drop_req = false;
735 
736 		/* we need to fallback on ctx allocation failure and on pre-reqs
737 		 * checking above. In the latter scenario we additionally need
738 		 * to reset the context to non MPTCP status.
739 		 */
740 		if (!ctx || fallback) {
741 			if (fallback_is_fatal) {
742 				subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
743 				goto dispose_child;
744 			}
745 
746 			subflow_drop_ctx(child);
747 			goto out;
748 		}
749 
750 		/* ssk inherits options of listener sk */
751 		ctx->setsockopt_seq = listener->setsockopt_seq;
752 
753 		if (ctx->mp_capable) {
754 			/* this can't race with mptcp_close(), as the msk is
755 			 * not yet exposted to user-space
756 			 */
757 			inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
758 
759 			/* record the newly created socket as the first msk
760 			 * subflow, but don't link it yet into conn_list
761 			 */
762 			WRITE_ONCE(mptcp_sk(new_msk)->first, child);
763 
764 			/* new mpc subflow takes ownership of the newly
765 			 * created mptcp socket
766 			 */
767 			new_msk->sk_destruct = mptcp_sock_destruct;
768 			mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
769 			mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
770 			mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
771 			ctx->conn = new_msk;
772 			new_msk = NULL;
773 
774 			/* with OoO packets we can reach here without ingress
775 			 * mpc option
776 			 */
777 			if (mp_opt.suboptions & OPTIONS_MPTCP_MPC)
778 				mptcp_subflow_fully_established(ctx, &mp_opt);
779 		} else if (ctx->mp_join) {
780 			struct mptcp_sock *owner;
781 
782 			owner = subflow_req->msk;
783 			if (!owner) {
784 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
785 				goto dispose_child;
786 			}
787 
788 			/* move the msk reference ownership to the subflow */
789 			subflow_req->msk = NULL;
790 			ctx->conn = (struct sock *)owner;
791 
792 			if (subflow_use_different_sport(owner, sk)) {
793 				pr_debug("ack inet_sport=%d %d",
794 					 ntohs(inet_sk(sk)->inet_sport),
795 					 ntohs(inet_sk((struct sock *)owner)->inet_sport));
796 				if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
797 					SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
798 					goto dispose_child;
799 				}
800 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
801 			}
802 
803 			if (!mptcp_finish_join(child))
804 				goto dispose_child;
805 
806 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
807 			tcp_rsk(req)->drop_req = true;
808 		}
809 	}
810 
811 out:
812 	/* dispose of the left over mptcp master, if any */
813 	if (unlikely(new_msk))
814 		mptcp_force_close(new_msk);
815 
816 	/* check for expected invariant - should never trigger, just help
817 	 * catching eariler subtle bugs
818 	 */
819 	WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
820 		     (!mptcp_subflow_ctx(child) ||
821 		      !mptcp_subflow_ctx(child)->conn));
822 	return child;
823 
824 dispose_child:
825 	subflow_drop_ctx(child);
826 	tcp_rsk(req)->drop_req = true;
827 	inet_csk_prepare_for_destroy_sock(child);
828 	tcp_done(child);
829 	req->rsk_ops->send_reset(sk, skb);
830 
831 	/* The last child reference will be released by the caller */
832 	return child;
833 }
834 
835 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
836 static struct proto tcp_prot_override;
837 
838 enum mapping_status {
839 	MAPPING_OK,
840 	MAPPING_INVALID,
841 	MAPPING_EMPTY,
842 	MAPPING_DATA_FIN,
843 	MAPPING_DUMMY
844 };
845 
846 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
847 {
848 	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
849 		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
850 }
851 
852 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
853 {
854 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
855 	unsigned int skb_consumed;
856 
857 	skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
858 	if (WARN_ON_ONCE(skb_consumed >= skb->len))
859 		return true;
860 
861 	return skb->len - skb_consumed <= subflow->map_data_len -
862 					  mptcp_subflow_get_map_offset(subflow);
863 }
864 
865 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
866 {
867 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
868 	u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
869 
870 	if (unlikely(before(ssn, subflow->map_subflow_seq))) {
871 		/* Mapping covers data later in the subflow stream,
872 		 * currently unsupported.
873 		 */
874 		dbg_bad_map(subflow, ssn);
875 		return false;
876 	}
877 	if (unlikely(!before(ssn, subflow->map_subflow_seq +
878 				  subflow->map_data_len))) {
879 		/* Mapping does covers past subflow data, invalid */
880 		dbg_bad_map(subflow, ssn);
881 		return false;
882 	}
883 	return true;
884 }
885 
886 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
887 					      bool csum_reqd)
888 {
889 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
890 	u32 offset, seq, delta;
891 	u16 csum;
892 	int len;
893 
894 	if (!csum_reqd)
895 		return MAPPING_OK;
896 
897 	/* mapping already validated on previous traversal */
898 	if (subflow->map_csum_len == subflow->map_data_len)
899 		return MAPPING_OK;
900 
901 	/* traverse the receive queue, ensuring it contains a full
902 	 * DSS mapping and accumulating the related csum.
903 	 * Preserve the accoumlate csum across multiple calls, to compute
904 	 * the csum only once
905 	 */
906 	delta = subflow->map_data_len - subflow->map_csum_len;
907 	for (;;) {
908 		seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
909 		offset = seq - TCP_SKB_CB(skb)->seq;
910 
911 		/* if the current skb has not been accounted yet, csum its contents
912 		 * up to the amount covered by the current DSS
913 		 */
914 		if (offset < skb->len) {
915 			__wsum csum;
916 
917 			len = min(skb->len - offset, delta);
918 			csum = skb_checksum(skb, offset, len, 0);
919 			subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
920 								subflow->map_csum_len);
921 
922 			delta -= len;
923 			subflow->map_csum_len += len;
924 		}
925 		if (delta == 0)
926 			break;
927 
928 		if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
929 			/* if this subflow is closed, the partial mapping
930 			 * will be never completed; flush the pending skbs, so
931 			 * that subflow_sched_work_if_closed() can kick in
932 			 */
933 			if (unlikely(ssk->sk_state == TCP_CLOSE))
934 				while ((skb = skb_peek(&ssk->sk_receive_queue)))
935 					sk_eat_skb(ssk, skb);
936 
937 			/* not enough data to validate the csum */
938 			return MAPPING_EMPTY;
939 		}
940 
941 		/* the DSS mapping for next skbs will be validated later,
942 		 * when a get_mapping_status call will process such skb
943 		 */
944 		skb = skb->next;
945 	}
946 
947 	/* note that 'map_data_len' accounts only for the carried data, does
948 	 * not include the eventual seq increment due to the data fin,
949 	 * while the pseudo header requires the original DSS data len,
950 	 * including that
951 	 */
952 	csum = __mptcp_make_csum(subflow->map_seq,
953 				 subflow->map_subflow_seq,
954 				 subflow->map_data_len + subflow->map_data_fin,
955 				 subflow->map_data_csum);
956 	if (unlikely(csum)) {
957 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
958 		subflow->send_mp_fail = 1;
959 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
960 		return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
961 	}
962 
963 	return MAPPING_OK;
964 }
965 
966 static enum mapping_status get_mapping_status(struct sock *ssk,
967 					      struct mptcp_sock *msk)
968 {
969 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
970 	bool csum_reqd = READ_ONCE(msk->csum_enabled);
971 	struct sock *sk = (struct sock *)msk;
972 	struct mptcp_ext *mpext;
973 	struct sk_buff *skb;
974 	u16 data_len;
975 	u64 map_seq;
976 
977 	skb = skb_peek(&ssk->sk_receive_queue);
978 	if (!skb)
979 		return MAPPING_EMPTY;
980 
981 	if (mptcp_check_fallback(ssk))
982 		return MAPPING_DUMMY;
983 
984 	mpext = mptcp_get_ext(skb);
985 	if (!mpext || !mpext->use_map) {
986 		if (!subflow->map_valid && !skb->len) {
987 			/* the TCP stack deliver 0 len FIN pkt to the receive
988 			 * queue, that is the only 0len pkts ever expected here,
989 			 * and we can admit no mapping only for 0 len pkts
990 			 */
991 			if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
992 				WARN_ONCE(1, "0len seq %d:%d flags %x",
993 					  TCP_SKB_CB(skb)->seq,
994 					  TCP_SKB_CB(skb)->end_seq,
995 					  TCP_SKB_CB(skb)->tcp_flags);
996 			sk_eat_skb(ssk, skb);
997 			return MAPPING_EMPTY;
998 		}
999 
1000 		if (!subflow->map_valid)
1001 			return MAPPING_INVALID;
1002 
1003 		goto validate_seq;
1004 	}
1005 
1006 	trace_get_mapping_status(mpext);
1007 
1008 	data_len = mpext->data_len;
1009 	if (data_len == 0) {
1010 		pr_debug("infinite mapping received");
1011 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1012 		subflow->map_data_len = 0;
1013 		if (sk && inet_sk_state_load(sk) != TCP_CLOSE) {
1014 			mptcp_data_lock(sk);
1015 			if (inet_sk_state_load(sk) != TCP_CLOSE)
1016 				sk_stop_timer(sk, &sk->sk_timer);
1017 			mptcp_data_unlock(sk);
1018 		}
1019 		return MAPPING_INVALID;
1020 	}
1021 
1022 	if (mpext->data_fin == 1) {
1023 		if (data_len == 1) {
1024 			bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1025 								 mpext->dsn64);
1026 			pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1027 			if (subflow->map_valid) {
1028 				/* A DATA_FIN might arrive in a DSS
1029 				 * option before the previous mapping
1030 				 * has been fully consumed. Continue
1031 				 * handling the existing mapping.
1032 				 */
1033 				skb_ext_del(skb, SKB_EXT_MPTCP);
1034 				return MAPPING_OK;
1035 			} else {
1036 				if (updated && schedule_work(&msk->work))
1037 					sock_hold((struct sock *)msk);
1038 
1039 				return MAPPING_DATA_FIN;
1040 			}
1041 		} else {
1042 			u64 data_fin_seq = mpext->data_seq + data_len - 1;
1043 
1044 			/* If mpext->data_seq is a 32-bit value, data_fin_seq
1045 			 * must also be limited to 32 bits.
1046 			 */
1047 			if (!mpext->dsn64)
1048 				data_fin_seq &= GENMASK_ULL(31, 0);
1049 
1050 			mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1051 			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1052 				 data_fin_seq, mpext->dsn64);
1053 		}
1054 
1055 		/* Adjust for DATA_FIN using 1 byte of sequence space */
1056 		data_len--;
1057 	}
1058 
1059 	map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1060 	WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1061 
1062 	if (subflow->map_valid) {
1063 		/* Allow replacing only with an identical map */
1064 		if (subflow->map_seq == map_seq &&
1065 		    subflow->map_subflow_seq == mpext->subflow_seq &&
1066 		    subflow->map_data_len == data_len &&
1067 		    subflow->map_csum_reqd == mpext->csum_reqd) {
1068 			skb_ext_del(skb, SKB_EXT_MPTCP);
1069 			goto validate_csum;
1070 		}
1071 
1072 		/* If this skb data are fully covered by the current mapping,
1073 		 * the new map would need caching, which is not supported
1074 		 */
1075 		if (skb_is_fully_mapped(ssk, skb)) {
1076 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1077 			return MAPPING_INVALID;
1078 		}
1079 
1080 		/* will validate the next map after consuming the current one */
1081 		goto validate_csum;
1082 	}
1083 
1084 	subflow->map_seq = map_seq;
1085 	subflow->map_subflow_seq = mpext->subflow_seq;
1086 	subflow->map_data_len = data_len;
1087 	subflow->map_valid = 1;
1088 	subflow->map_data_fin = mpext->data_fin;
1089 	subflow->mpc_map = mpext->mpc_map;
1090 	subflow->map_csum_reqd = mpext->csum_reqd;
1091 	subflow->map_csum_len = 0;
1092 	subflow->map_data_csum = csum_unfold(mpext->csum);
1093 
1094 	/* Cfr RFC 8684 Section 3.3.0 */
1095 	if (unlikely(subflow->map_csum_reqd != csum_reqd))
1096 		return MAPPING_INVALID;
1097 
1098 	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1099 		 subflow->map_seq, subflow->map_subflow_seq,
1100 		 subflow->map_data_len, subflow->map_csum_reqd,
1101 		 subflow->map_data_csum);
1102 
1103 validate_seq:
1104 	/* we revalidate valid mapping on new skb, because we must ensure
1105 	 * the current skb is completely covered by the available mapping
1106 	 */
1107 	if (!validate_mapping(ssk, skb)) {
1108 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1109 		return MAPPING_INVALID;
1110 	}
1111 
1112 	skb_ext_del(skb, SKB_EXT_MPTCP);
1113 
1114 validate_csum:
1115 	return validate_data_csum(ssk, skb, csum_reqd);
1116 }
1117 
1118 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1119 				       u64 limit)
1120 {
1121 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1122 	bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1123 	u32 incr;
1124 
1125 	incr = limit >= skb->len ? skb->len + fin : limit;
1126 
1127 	pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1128 		 subflow->map_subflow_seq);
1129 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1130 	tcp_sk(ssk)->copied_seq += incr;
1131 	if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1132 		sk_eat_skb(ssk, skb);
1133 	if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1134 		subflow->map_valid = 0;
1135 }
1136 
1137 /* sched mptcp worker to remove the subflow if no more data is pending */
1138 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1139 {
1140 	struct sock *sk = (struct sock *)msk;
1141 
1142 	if (likely(ssk->sk_state != TCP_CLOSE))
1143 		return;
1144 
1145 	if (skb_queue_empty(&ssk->sk_receive_queue) &&
1146 	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
1147 		sock_hold(sk);
1148 		if (!schedule_work(&msk->work))
1149 			sock_put(sk);
1150 	}
1151 }
1152 
1153 static bool subflow_check_data_avail(struct sock *ssk)
1154 {
1155 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1156 	enum mapping_status status;
1157 	struct mptcp_sock *msk;
1158 	struct sk_buff *skb;
1159 
1160 	if (!skb_peek(&ssk->sk_receive_queue))
1161 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1162 	if (subflow->data_avail)
1163 		return true;
1164 
1165 	msk = mptcp_sk(subflow->conn);
1166 	for (;;) {
1167 		u64 ack_seq;
1168 		u64 old_ack;
1169 
1170 		status = get_mapping_status(ssk, msk);
1171 		trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1172 		if (unlikely(status == MAPPING_INVALID))
1173 			goto fallback;
1174 
1175 		if (unlikely(status == MAPPING_DUMMY))
1176 			goto fallback;
1177 
1178 		if (status != MAPPING_OK)
1179 			goto no_data;
1180 
1181 		skb = skb_peek(&ssk->sk_receive_queue);
1182 		if (WARN_ON_ONCE(!skb))
1183 			goto no_data;
1184 
1185 		/* if msk lacks the remote key, this subflow must provide an
1186 		 * MP_CAPABLE-based mapping
1187 		 */
1188 		if (unlikely(!READ_ONCE(msk->can_ack))) {
1189 			if (!subflow->mpc_map)
1190 				goto fallback;
1191 			WRITE_ONCE(msk->remote_key, subflow->remote_key);
1192 			WRITE_ONCE(msk->ack_seq, subflow->map_seq);
1193 			WRITE_ONCE(msk->can_ack, true);
1194 		}
1195 
1196 		old_ack = READ_ONCE(msk->ack_seq);
1197 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1198 		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1199 			 ack_seq);
1200 		if (unlikely(before64(ack_seq, old_ack))) {
1201 			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1202 			continue;
1203 		}
1204 
1205 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1206 		break;
1207 	}
1208 	return true;
1209 
1210 no_data:
1211 	subflow_sched_work_if_closed(msk, ssk);
1212 	return false;
1213 
1214 fallback:
1215 	if (!__mptcp_check_fallback(msk)) {
1216 		/* RFC 8684 section 3.7. */
1217 		if (subflow->send_mp_fail) {
1218 			if (mptcp_has_another_subflow(ssk) ||
1219 			    !READ_ONCE(msk->allow_infinite_fallback)) {
1220 				ssk->sk_err = EBADMSG;
1221 				tcp_set_state(ssk, TCP_CLOSE);
1222 				subflow->reset_transient = 0;
1223 				subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1224 				tcp_send_active_reset(ssk, GFP_ATOMIC);
1225 				while ((skb = skb_peek(&ssk->sk_receive_queue)))
1226 					sk_eat_skb(ssk, skb);
1227 			} else {
1228 				WRITE_ONCE(subflow->mp_fail_response_expect, true);
1229 				/* The data lock is acquired in __mptcp_move_skbs() */
1230 				sk_reset_timer((struct sock *)msk,
1231 					       &((struct sock *)msk)->sk_timer,
1232 					       jiffies + TCP_RTO_MAX);
1233 			}
1234 			WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1235 			return true;
1236 		}
1237 
1238 		if ((subflow->mp_join || subflow->fully_established) && subflow->map_data_len) {
1239 			/* fatal protocol error, close the socket.
1240 			 * subflow_error_report() will introduce the appropriate barriers
1241 			 */
1242 			ssk->sk_err = EBADMSG;
1243 			tcp_set_state(ssk, TCP_CLOSE);
1244 			subflow->reset_transient = 0;
1245 			subflow->reset_reason = MPTCP_RST_EMPTCP;
1246 			tcp_send_active_reset(ssk, GFP_ATOMIC);
1247 			WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1248 			return false;
1249 		}
1250 
1251 		__mptcp_do_fallback(msk);
1252 	}
1253 
1254 	skb = skb_peek(&ssk->sk_receive_queue);
1255 	subflow->map_valid = 1;
1256 	subflow->map_seq = READ_ONCE(msk->ack_seq);
1257 	subflow->map_data_len = skb->len;
1258 	subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1259 	WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1260 	return true;
1261 }
1262 
1263 bool mptcp_subflow_data_available(struct sock *sk)
1264 {
1265 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1266 
1267 	/* check if current mapping is still valid */
1268 	if (subflow->map_valid &&
1269 	    mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1270 		subflow->map_valid = 0;
1271 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1272 
1273 		pr_debug("Done with mapping: seq=%u data_len=%u",
1274 			 subflow->map_subflow_seq,
1275 			 subflow->map_data_len);
1276 	}
1277 
1278 	return subflow_check_data_avail(sk);
1279 }
1280 
1281 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1282  * not the ssk one.
1283  *
1284  * In mptcp, rwin is about the mptcp-level connection data.
1285  *
1286  * Data that is still on the ssk rx queue can thus be ignored,
1287  * as far as mptcp peer is concerned that data is still inflight.
1288  * DSS ACK is updated when skb is moved to the mptcp rx queue.
1289  */
1290 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1291 {
1292 	const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1293 	const struct sock *sk = subflow->conn;
1294 
1295 	*space = __mptcp_space(sk);
1296 	*full_space = tcp_full_space(sk);
1297 }
1298 
1299 void __mptcp_error_report(struct sock *sk)
1300 {
1301 	struct mptcp_subflow_context *subflow;
1302 	struct mptcp_sock *msk = mptcp_sk(sk);
1303 
1304 	mptcp_for_each_subflow(msk, subflow) {
1305 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1306 		int err = sock_error(ssk);
1307 
1308 		if (!err)
1309 			continue;
1310 
1311 		/* only propagate errors on fallen-back sockets or
1312 		 * on MPC connect
1313 		 */
1314 		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1315 			continue;
1316 
1317 		inet_sk_state_store(sk, inet_sk_state_load(ssk));
1318 		sk->sk_err = -err;
1319 
1320 		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
1321 		smp_wmb();
1322 		sk_error_report(sk);
1323 		break;
1324 	}
1325 }
1326 
1327 static void subflow_error_report(struct sock *ssk)
1328 {
1329 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1330 
1331 	mptcp_data_lock(sk);
1332 	if (!sock_owned_by_user(sk))
1333 		__mptcp_error_report(sk);
1334 	else
1335 		__set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->cb_flags);
1336 	mptcp_data_unlock(sk);
1337 }
1338 
1339 static void subflow_data_ready(struct sock *sk)
1340 {
1341 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1342 	u16 state = 1 << inet_sk_state_load(sk);
1343 	struct sock *parent = subflow->conn;
1344 	struct mptcp_sock *msk;
1345 
1346 	msk = mptcp_sk(parent);
1347 	if (state & TCPF_LISTEN) {
1348 		/* MPJ subflow are removed from accept queue before reaching here,
1349 		 * avoid stray wakeups
1350 		 */
1351 		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1352 			return;
1353 
1354 		parent->sk_data_ready(parent);
1355 		return;
1356 	}
1357 
1358 	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1359 		     !subflow->mp_join && !(state & TCPF_CLOSE));
1360 
1361 	if (mptcp_subflow_data_available(sk))
1362 		mptcp_data_ready(parent, sk);
1363 	else if (unlikely(sk->sk_err))
1364 		subflow_error_report(sk);
1365 }
1366 
1367 static void subflow_write_space(struct sock *ssk)
1368 {
1369 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1370 
1371 	mptcp_propagate_sndbuf(sk, ssk);
1372 	mptcp_write_space(sk);
1373 }
1374 
1375 static const struct inet_connection_sock_af_ops *
1376 subflow_default_af_ops(struct sock *sk)
1377 {
1378 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1379 	if (sk->sk_family == AF_INET6)
1380 		return &subflow_v6_specific;
1381 #endif
1382 	return &subflow_specific;
1383 }
1384 
1385 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1386 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1387 {
1388 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1389 	struct inet_connection_sock *icsk = inet_csk(sk);
1390 	const struct inet_connection_sock_af_ops *target;
1391 
1392 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1393 
1394 	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1395 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1396 
1397 	if (likely(icsk->icsk_af_ops == target))
1398 		return;
1399 
1400 	subflow->icsk_af_ops = icsk->icsk_af_ops;
1401 	icsk->icsk_af_ops = target;
1402 }
1403 #endif
1404 
1405 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1406 			 struct sockaddr_storage *addr,
1407 			 unsigned short family)
1408 {
1409 	memset(addr, 0, sizeof(*addr));
1410 	addr->ss_family = family;
1411 	if (addr->ss_family == AF_INET) {
1412 		struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1413 
1414 		if (info->family == AF_INET)
1415 			in_addr->sin_addr = info->addr;
1416 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1417 		else if (ipv6_addr_v4mapped(&info->addr6))
1418 			in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1419 #endif
1420 		in_addr->sin_port = info->port;
1421 	}
1422 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1423 	else if (addr->ss_family == AF_INET6) {
1424 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1425 
1426 		if (info->family == AF_INET)
1427 			ipv6_addr_set_v4mapped(info->addr.s_addr,
1428 					       &in6_addr->sin6_addr);
1429 		else
1430 			in6_addr->sin6_addr = info->addr6;
1431 		in6_addr->sin6_port = info->port;
1432 	}
1433 #endif
1434 }
1435 
1436 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1437 			    const struct mptcp_addr_info *remote)
1438 {
1439 	struct mptcp_sock *msk = mptcp_sk(sk);
1440 	struct mptcp_subflow_context *subflow;
1441 	struct sockaddr_storage addr;
1442 	int remote_id = remote->id;
1443 	int local_id = loc->id;
1444 	struct socket *sf;
1445 	struct sock *ssk;
1446 	u32 remote_token;
1447 	int addrlen;
1448 	int ifindex;
1449 	u8 flags;
1450 	int err;
1451 
1452 	if (!mptcp_is_fully_established(sk))
1453 		return -ENOTCONN;
1454 
1455 	err = mptcp_subflow_create_socket(sk, &sf);
1456 	if (err)
1457 		return err;
1458 
1459 	ssk = sf->sk;
1460 	subflow = mptcp_subflow_ctx(ssk);
1461 	do {
1462 		get_random_bytes(&subflow->local_nonce, sizeof(u32));
1463 	} while (!subflow->local_nonce);
1464 
1465 	if (local_id)
1466 		subflow_set_local_id(subflow, local_id);
1467 
1468 	mptcp_pm_get_flags_and_ifindex_by_id(sock_net(sk), local_id,
1469 					     &flags, &ifindex);
1470 	subflow->remote_key = msk->remote_key;
1471 	subflow->local_key = msk->local_key;
1472 	subflow->token = msk->token;
1473 	mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1474 
1475 	addrlen = sizeof(struct sockaddr_in);
1476 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1477 	if (addr.ss_family == AF_INET6)
1478 		addrlen = sizeof(struct sockaddr_in6);
1479 #endif
1480 	mptcp_sockopt_sync(msk, ssk);
1481 
1482 	ssk->sk_bound_dev_if = ifindex;
1483 	err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1484 	if (err)
1485 		goto failed;
1486 
1487 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1488 	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1489 		 remote_token, local_id, remote_id);
1490 	subflow->remote_token = remote_token;
1491 	subflow->remote_id = remote_id;
1492 	subflow->request_join = 1;
1493 	subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1494 	mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1495 
1496 	sock_hold(ssk);
1497 	list_add_tail(&subflow->node, &msk->conn_list);
1498 	err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1499 	if (err && err != -EINPROGRESS)
1500 		goto failed_unlink;
1501 
1502 	/* discard the subflow socket */
1503 	mptcp_sock_graft(ssk, sk->sk_socket);
1504 	iput(SOCK_INODE(sf));
1505 	WRITE_ONCE(msk->allow_infinite_fallback, false);
1506 	return err;
1507 
1508 failed_unlink:
1509 	list_del(&subflow->node);
1510 	sock_put(mptcp_subflow_tcp_sock(subflow));
1511 
1512 failed:
1513 	subflow->disposable = 1;
1514 	sock_release(sf);
1515 	return err;
1516 }
1517 
1518 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1519 {
1520 #ifdef CONFIG_SOCK_CGROUP_DATA
1521 	struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1522 				*child_skcd = &child->sk_cgrp_data;
1523 
1524 	/* only the additional subflows created by kworkers have to be modified */
1525 	if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1526 	    cgroup_id(sock_cgroup_ptr(child_skcd))) {
1527 #ifdef CONFIG_MEMCG
1528 		struct mem_cgroup *memcg = parent->sk_memcg;
1529 
1530 		mem_cgroup_sk_free(child);
1531 		if (memcg && css_tryget(&memcg->css))
1532 			child->sk_memcg = memcg;
1533 #endif /* CONFIG_MEMCG */
1534 
1535 		cgroup_sk_free(child_skcd);
1536 		*child_skcd = *parent_skcd;
1537 		cgroup_sk_clone(child_skcd);
1538 	}
1539 #endif /* CONFIG_SOCK_CGROUP_DATA */
1540 }
1541 
1542 static void mptcp_subflow_ops_override(struct sock *ssk)
1543 {
1544 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1545 	if (ssk->sk_prot == &tcpv6_prot)
1546 		ssk->sk_prot = &tcpv6_prot_override;
1547 	else
1548 #endif
1549 		ssk->sk_prot = &tcp_prot_override;
1550 }
1551 
1552 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1553 {
1554 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1555 	if (ssk->sk_prot == &tcpv6_prot_override)
1556 		ssk->sk_prot = &tcpv6_prot;
1557 	else
1558 #endif
1559 		ssk->sk_prot = &tcp_prot;
1560 }
1561 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1562 {
1563 	struct mptcp_subflow_context *subflow;
1564 	struct net *net = sock_net(sk);
1565 	struct socket *sf;
1566 	int err;
1567 
1568 	/* un-accepted server sockets can reach here - on bad configuration
1569 	 * bail early to avoid greater trouble later
1570 	 */
1571 	if (unlikely(!sk->sk_socket))
1572 		return -EINVAL;
1573 
1574 	err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1575 			       &sf);
1576 	if (err)
1577 		return err;
1578 
1579 	lock_sock(sf->sk);
1580 
1581 	/* the newly created socket has to be in the same cgroup as its parent */
1582 	mptcp_attach_cgroup(sk, sf->sk);
1583 
1584 	/* kernel sockets do not by default acquire net ref, but TCP timer
1585 	 * needs it.
1586 	 */
1587 	sf->sk->sk_net_refcnt = 1;
1588 	get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1589 	sock_inuse_add(net, 1);
1590 	err = tcp_set_ulp(sf->sk, "mptcp");
1591 	release_sock(sf->sk);
1592 
1593 	if (err) {
1594 		sock_release(sf);
1595 		return err;
1596 	}
1597 
1598 	/* the newly created socket really belongs to the owning MPTCP master
1599 	 * socket, even if for additional subflows the allocation is performed
1600 	 * by a kernel workqueue. Adjust inode references, so that the
1601 	 * procfs/diag interaces really show this one belonging to the correct
1602 	 * user.
1603 	 */
1604 	SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1605 	SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1606 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1607 
1608 	subflow = mptcp_subflow_ctx(sf->sk);
1609 	pr_debug("subflow=%p", subflow);
1610 
1611 	*new_sock = sf;
1612 	sock_hold(sk);
1613 	subflow->conn = sk;
1614 	mptcp_subflow_ops_override(sf->sk);
1615 
1616 	return 0;
1617 }
1618 
1619 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1620 							gfp_t priority)
1621 {
1622 	struct inet_connection_sock *icsk = inet_csk(sk);
1623 	struct mptcp_subflow_context *ctx;
1624 
1625 	ctx = kzalloc(sizeof(*ctx), priority);
1626 	if (!ctx)
1627 		return NULL;
1628 
1629 	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1630 	INIT_LIST_HEAD(&ctx->node);
1631 	INIT_LIST_HEAD(&ctx->delegated_node);
1632 
1633 	pr_debug("subflow=%p", ctx);
1634 
1635 	ctx->tcp_sock = sk;
1636 
1637 	return ctx;
1638 }
1639 
1640 static void __subflow_state_change(struct sock *sk)
1641 {
1642 	struct socket_wq *wq;
1643 
1644 	rcu_read_lock();
1645 	wq = rcu_dereference(sk->sk_wq);
1646 	if (skwq_has_sleeper(wq))
1647 		wake_up_interruptible_all(&wq->wait);
1648 	rcu_read_unlock();
1649 }
1650 
1651 static bool subflow_is_done(const struct sock *sk)
1652 {
1653 	return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1654 }
1655 
1656 static void subflow_state_change(struct sock *sk)
1657 {
1658 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1659 	struct sock *parent = subflow->conn;
1660 
1661 	__subflow_state_change(sk);
1662 
1663 	if (subflow_simultaneous_connect(sk)) {
1664 		mptcp_propagate_sndbuf(parent, sk);
1665 		mptcp_do_fallback(sk);
1666 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
1667 		pr_fallback(mptcp_sk(parent));
1668 		subflow->conn_finished = 1;
1669 		mptcp_set_connected(parent);
1670 	}
1671 
1672 	/* as recvmsg() does not acquire the subflow socket for ssk selection
1673 	 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1674 	 * the data available machinery here.
1675 	 */
1676 	if (mptcp_subflow_data_available(sk))
1677 		mptcp_data_ready(parent, sk);
1678 	else if (unlikely(sk->sk_err))
1679 		subflow_error_report(sk);
1680 
1681 	subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1682 
1683 	if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1684 	    !subflow->rx_eof && subflow_is_done(sk)) {
1685 		subflow->rx_eof = 1;
1686 		mptcp_subflow_eof(parent);
1687 	}
1688 }
1689 
1690 static int subflow_ulp_init(struct sock *sk)
1691 {
1692 	struct inet_connection_sock *icsk = inet_csk(sk);
1693 	struct mptcp_subflow_context *ctx;
1694 	struct tcp_sock *tp = tcp_sk(sk);
1695 	int err = 0;
1696 
1697 	/* disallow attaching ULP to a socket unless it has been
1698 	 * created with sock_create_kern()
1699 	 */
1700 	if (!sk->sk_kern_sock) {
1701 		err = -EOPNOTSUPP;
1702 		goto out;
1703 	}
1704 
1705 	ctx = subflow_create_ctx(sk, GFP_KERNEL);
1706 	if (!ctx) {
1707 		err = -ENOMEM;
1708 		goto out;
1709 	}
1710 
1711 	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1712 
1713 	tp->is_mptcp = 1;
1714 	ctx->icsk_af_ops = icsk->icsk_af_ops;
1715 	icsk->icsk_af_ops = subflow_default_af_ops(sk);
1716 	ctx->tcp_state_change = sk->sk_state_change;
1717 	ctx->tcp_error_report = sk->sk_error_report;
1718 
1719 	WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1720 	WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1721 
1722 	sk->sk_data_ready = subflow_data_ready;
1723 	sk->sk_write_space = subflow_write_space;
1724 	sk->sk_state_change = subflow_state_change;
1725 	sk->sk_error_report = subflow_error_report;
1726 out:
1727 	return err;
1728 }
1729 
1730 static void subflow_ulp_release(struct sock *ssk)
1731 {
1732 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1733 	bool release = true;
1734 	struct sock *sk;
1735 
1736 	if (!ctx)
1737 		return;
1738 
1739 	sk = ctx->conn;
1740 	if (sk) {
1741 		/* if the msk has been orphaned, keep the ctx
1742 		 * alive, will be freed by __mptcp_close_ssk(),
1743 		 * when the subflow is still unaccepted
1744 		 */
1745 		release = ctx->disposable || list_empty(&ctx->node);
1746 		sock_put(sk);
1747 	}
1748 
1749 	mptcp_subflow_ops_undo_override(ssk);
1750 	if (release)
1751 		kfree_rcu(ctx, rcu);
1752 }
1753 
1754 static void subflow_ulp_clone(const struct request_sock *req,
1755 			      struct sock *newsk,
1756 			      const gfp_t priority)
1757 {
1758 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1759 	struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1760 	struct mptcp_subflow_context *new_ctx;
1761 
1762 	if (!tcp_rsk(req)->is_mptcp ||
1763 	    (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1764 		subflow_ulp_fallback(newsk, old_ctx);
1765 		return;
1766 	}
1767 
1768 	new_ctx = subflow_create_ctx(newsk, priority);
1769 	if (!new_ctx) {
1770 		subflow_ulp_fallback(newsk, old_ctx);
1771 		return;
1772 	}
1773 
1774 	new_ctx->conn_finished = 1;
1775 	new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1776 	new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1777 	new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1778 	new_ctx->rel_write_seq = 1;
1779 	new_ctx->tcp_sock = newsk;
1780 
1781 	if (subflow_req->mp_capable) {
1782 		/* see comments in subflow_syn_recv_sock(), MPTCP connection
1783 		 * is fully established only after we receive the remote key
1784 		 */
1785 		new_ctx->mp_capable = 1;
1786 		new_ctx->local_key = subflow_req->local_key;
1787 		new_ctx->token = subflow_req->token;
1788 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1789 		new_ctx->idsn = subflow_req->idsn;
1790 
1791 		/* this is the first subflow, id is always 0 */
1792 		new_ctx->local_id_valid = 1;
1793 	} else if (subflow_req->mp_join) {
1794 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1795 		new_ctx->mp_join = 1;
1796 		new_ctx->fully_established = 1;
1797 		new_ctx->backup = subflow_req->backup;
1798 		new_ctx->remote_id = subflow_req->remote_id;
1799 		new_ctx->token = subflow_req->token;
1800 		new_ctx->thmac = subflow_req->thmac;
1801 
1802 		/* the subflow req id is valid, fetched via subflow_check_req()
1803 		 * and subflow_token_join_request()
1804 		 */
1805 		subflow_set_local_id(new_ctx, subflow_req->local_id);
1806 	}
1807 }
1808 
1809 static void tcp_release_cb_override(struct sock *ssk)
1810 {
1811 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1812 
1813 	if (mptcp_subflow_has_delegated_action(subflow))
1814 		mptcp_subflow_process_delegated(ssk);
1815 
1816 	tcp_release_cb(ssk);
1817 }
1818 
1819 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1820 	.name		= "mptcp",
1821 	.owner		= THIS_MODULE,
1822 	.init		= subflow_ulp_init,
1823 	.release	= subflow_ulp_release,
1824 	.clone		= subflow_ulp_clone,
1825 };
1826 
1827 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1828 {
1829 	subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1830 	subflow_ops->slab_name = "request_sock_subflow";
1831 
1832 	subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1833 					      subflow_ops->obj_size, 0,
1834 					      SLAB_ACCOUNT |
1835 					      SLAB_TYPESAFE_BY_RCU,
1836 					      NULL);
1837 	if (!subflow_ops->slab)
1838 		return -ENOMEM;
1839 
1840 	subflow_ops->destructor = subflow_req_destructor;
1841 
1842 	return 0;
1843 }
1844 
1845 void __init mptcp_subflow_init(void)
1846 {
1847 	mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1848 	if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
1849 		panic("MPTCP: failed to init subflow request sock ops\n");
1850 
1851 	subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1852 	subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
1853 
1854 	subflow_specific = ipv4_specific;
1855 	subflow_specific.conn_request = subflow_v4_conn_request;
1856 	subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1857 	subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1858 	subflow_specific.rebuild_header = subflow_rebuild_header;
1859 
1860 	tcp_prot_override = tcp_prot;
1861 	tcp_prot_override.release_cb = tcp_release_cb_override;
1862 
1863 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1864 	subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1865 	subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
1866 
1867 	subflow_v6_specific = ipv6_specific;
1868 	subflow_v6_specific.conn_request = subflow_v6_conn_request;
1869 	subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1870 	subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1871 	subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
1872 
1873 	subflow_v6m_specific = subflow_v6_specific;
1874 	subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1875 	subflow_v6m_specific.send_check = ipv4_specific.send_check;
1876 	subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1877 	subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1878 	subflow_v6m_specific.net_frag_header_len = 0;
1879 	subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
1880 
1881 	tcpv6_prot_override = tcpv6_prot;
1882 	tcpv6_prot_override.release_cb = tcp_release_cb_override;
1883 #endif
1884 
1885 	mptcp_diag_subflow_init(&subflow_ulp_ops);
1886 
1887 	if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1888 		panic("MPTCP: failed to register subflows to ULP\n");
1889 }
1890