xref: /linux/net/mptcp/subflow.c (revision 2bd87951de659df3381ce083342aaf5b1ea24689)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/sha2.h>
13 #include <crypto/utils.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
19 #include <net/ip6_route.h>
20 #include <net/transp_v6.h>
21 #endif
22 #include <net/mptcp.h>
23 #include "protocol.h"
24 #include "mib.h"
25 
26 #include <trace/events/mptcp.h>
27 #include <trace/events/sock.h>
28 
29 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
30 
31 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
32 				  enum linux_mptcp_mib_field field)
33 {
34 	MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
35 }
36 
37 static void subflow_req_destructor(struct request_sock *req)
38 {
39 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
40 
41 	pr_debug("subflow_req=%p", subflow_req);
42 
43 	if (subflow_req->msk)
44 		sock_put((struct sock *)subflow_req->msk);
45 
46 	mptcp_token_destroy_request(req);
47 }
48 
49 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
50 				  void *hmac)
51 {
52 	u8 msg[8];
53 
54 	put_unaligned_be32(nonce1, &msg[0]);
55 	put_unaligned_be32(nonce2, &msg[4]);
56 
57 	mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
58 }
59 
60 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
61 {
62 	return mptcp_is_fully_established((void *)msk) &&
63 		((mptcp_pm_is_userspace(msk) &&
64 		  mptcp_userspace_pm_active(msk)) ||
65 		 READ_ONCE(msk->pm.accept_subflow));
66 }
67 
68 /* validate received token and create truncated hmac and nonce for SYN-ACK */
69 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
70 {
71 	struct mptcp_sock *msk = subflow_req->msk;
72 	u8 hmac[SHA256_DIGEST_SIZE];
73 
74 	get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
75 
76 	subflow_generate_hmac(READ_ONCE(msk->local_key),
77 			      READ_ONCE(msk->remote_key),
78 			      subflow_req->local_nonce,
79 			      subflow_req->remote_nonce, hmac);
80 
81 	subflow_req->thmac = get_unaligned_be64(hmac);
82 }
83 
84 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
85 {
86 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
87 	struct mptcp_sock *msk;
88 	int local_id;
89 
90 	msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
91 	if (!msk) {
92 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
93 		return NULL;
94 	}
95 
96 	local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
97 	if (local_id < 0) {
98 		sock_put((struct sock *)msk);
99 		return NULL;
100 	}
101 	subflow_req->local_id = local_id;
102 
103 	return msk;
104 }
105 
106 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
107 {
108 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
109 
110 	subflow_req->mp_capable = 0;
111 	subflow_req->mp_join = 0;
112 	subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
113 	subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
114 	subflow_req->msk = NULL;
115 	mptcp_token_init_request(req);
116 }
117 
118 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
119 {
120 	return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
121 }
122 
123 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
124 {
125 	struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
126 
127 	if (mpext) {
128 		memset(mpext, 0, sizeof(*mpext));
129 		mpext->reset_reason = reason;
130 	}
131 }
132 
133 /* Init mptcp request socket.
134  *
135  * Returns an error code if a JOIN has failed and a TCP reset
136  * should be sent.
137  */
138 static int subflow_check_req(struct request_sock *req,
139 			     const struct sock *sk_listener,
140 			     struct sk_buff *skb)
141 {
142 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
143 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
144 	struct mptcp_options_received mp_opt;
145 	bool opt_mp_capable, opt_mp_join;
146 
147 	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
148 
149 #ifdef CONFIG_TCP_MD5SIG
150 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
151 	 * TCP option space.
152 	 */
153 	if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) {
154 		subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
155 		return -EINVAL;
156 	}
157 #endif
158 
159 	mptcp_get_options(skb, &mp_opt);
160 
161 	opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
162 	opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
163 	if (opt_mp_capable) {
164 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
165 
166 		if (opt_mp_join)
167 			return 0;
168 	} else if (opt_mp_join) {
169 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
170 	}
171 
172 	if (opt_mp_capable && listener->request_mptcp) {
173 		int err, retries = MPTCP_TOKEN_MAX_RETRIES;
174 
175 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
176 again:
177 		do {
178 			get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
179 		} while (subflow_req->local_key == 0);
180 
181 		if (unlikely(req->syncookie)) {
182 			mptcp_crypto_key_sha(subflow_req->local_key,
183 					     &subflow_req->token,
184 					     &subflow_req->idsn);
185 			if (mptcp_token_exists(subflow_req->token)) {
186 				if (retries-- > 0)
187 					goto again;
188 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
189 			} else {
190 				subflow_req->mp_capable = 1;
191 			}
192 			return 0;
193 		}
194 
195 		err = mptcp_token_new_request(req);
196 		if (err == 0)
197 			subflow_req->mp_capable = 1;
198 		else if (retries-- > 0)
199 			goto again;
200 		else
201 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
202 
203 	} else if (opt_mp_join && listener->request_mptcp) {
204 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
205 		subflow_req->mp_join = 1;
206 		subflow_req->backup = mp_opt.backup;
207 		subflow_req->remote_id = mp_opt.join_id;
208 		subflow_req->token = mp_opt.token;
209 		subflow_req->remote_nonce = mp_opt.nonce;
210 		subflow_req->msk = subflow_token_join_request(req);
211 
212 		/* Can't fall back to TCP in this case. */
213 		if (!subflow_req->msk) {
214 			subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
215 			return -EPERM;
216 		}
217 
218 		if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
219 			pr_debug("syn inet_sport=%d %d",
220 				 ntohs(inet_sk(sk_listener)->inet_sport),
221 				 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
222 			if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
223 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
224 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
225 				return -EPERM;
226 			}
227 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
228 		}
229 
230 		subflow_req_create_thmac(subflow_req);
231 
232 		if (unlikely(req->syncookie)) {
233 			if (!mptcp_can_accept_new_subflow(subflow_req->msk)) {
234 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
235 				return -EPERM;
236 			}
237 
238 			subflow_init_req_cookie_join_save(subflow_req, skb);
239 		}
240 
241 		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
242 			 subflow_req->remote_nonce, subflow_req->msk);
243 	}
244 
245 	return 0;
246 }
247 
248 int mptcp_subflow_init_cookie_req(struct request_sock *req,
249 				  const struct sock *sk_listener,
250 				  struct sk_buff *skb)
251 {
252 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
253 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
254 	struct mptcp_options_received mp_opt;
255 	bool opt_mp_capable, opt_mp_join;
256 	int err;
257 
258 	subflow_init_req(req, sk_listener);
259 	mptcp_get_options(skb, &mp_opt);
260 
261 	opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
262 	opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
263 	if (opt_mp_capable && opt_mp_join)
264 		return -EINVAL;
265 
266 	if (opt_mp_capable && listener->request_mptcp) {
267 		if (mp_opt.sndr_key == 0)
268 			return -EINVAL;
269 
270 		subflow_req->local_key = mp_opt.rcvr_key;
271 		err = mptcp_token_new_request(req);
272 		if (err)
273 			return err;
274 
275 		subflow_req->mp_capable = 1;
276 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
277 	} else if (opt_mp_join && listener->request_mptcp) {
278 		if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
279 			return -EINVAL;
280 
281 		subflow_req->mp_join = 1;
282 		subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
283 	}
284 
285 	return 0;
286 }
287 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
288 
289 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
290 					      struct sk_buff *skb,
291 					      struct flowi *fl,
292 					      struct request_sock *req,
293 					      u32 tw_isn)
294 {
295 	struct dst_entry *dst;
296 	int err;
297 
298 	tcp_rsk(req)->is_mptcp = 1;
299 	subflow_init_req(req, sk);
300 
301 	dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn);
302 	if (!dst)
303 		return NULL;
304 
305 	err = subflow_check_req(req, sk, skb);
306 	if (err == 0)
307 		return dst;
308 
309 	dst_release(dst);
310 	if (!req->syncookie)
311 		tcp_request_sock_ops.send_reset(sk, skb);
312 	return NULL;
313 }
314 
315 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
316 				struct tcp_fastopen_cookie *foc,
317 				enum tcp_synack_type synack_type)
318 {
319 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
320 	struct inet_request_sock *ireq = inet_rsk(req);
321 
322 	/* clear tstamp_ok, as needed depending on cookie */
323 	if (foc && foc->len > -1)
324 		ireq->tstamp_ok = 0;
325 
326 	if (synack_type == TCP_SYNACK_FASTOPEN)
327 		mptcp_fastopen_subflow_synack_set_params(subflow, req);
328 }
329 
330 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
331 				  struct flowi *fl,
332 				  struct request_sock *req,
333 				  struct tcp_fastopen_cookie *foc,
334 				  enum tcp_synack_type synack_type,
335 				  struct sk_buff *syn_skb)
336 {
337 	subflow_prep_synack(sk, req, foc, synack_type);
338 
339 	return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
340 						     synack_type, syn_skb);
341 }
342 
343 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
344 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
345 				  struct flowi *fl,
346 				  struct request_sock *req,
347 				  struct tcp_fastopen_cookie *foc,
348 				  enum tcp_synack_type synack_type,
349 				  struct sk_buff *syn_skb)
350 {
351 	subflow_prep_synack(sk, req, foc, synack_type);
352 
353 	return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
354 						     synack_type, syn_skb);
355 }
356 
357 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
358 					      struct sk_buff *skb,
359 					      struct flowi *fl,
360 					      struct request_sock *req,
361 					      u32 tw_isn)
362 {
363 	struct dst_entry *dst;
364 	int err;
365 
366 	tcp_rsk(req)->is_mptcp = 1;
367 	subflow_init_req(req, sk);
368 
369 	dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn);
370 	if (!dst)
371 		return NULL;
372 
373 	err = subflow_check_req(req, sk, skb);
374 	if (err == 0)
375 		return dst;
376 
377 	dst_release(dst);
378 	if (!req->syncookie)
379 		tcp6_request_sock_ops.send_reset(sk, skb);
380 	return NULL;
381 }
382 #endif
383 
384 /* validate received truncated hmac and create hmac for third ACK */
385 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
386 {
387 	u8 hmac[SHA256_DIGEST_SIZE];
388 	u64 thmac;
389 
390 	subflow_generate_hmac(subflow->remote_key, subflow->local_key,
391 			      subflow->remote_nonce, subflow->local_nonce,
392 			      hmac);
393 
394 	thmac = get_unaligned_be64(hmac);
395 	pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
396 		 subflow, subflow->token, thmac, subflow->thmac);
397 
398 	return thmac == subflow->thmac;
399 }
400 
401 void mptcp_subflow_reset(struct sock *ssk)
402 {
403 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
404 	struct sock *sk = subflow->conn;
405 
406 	/* mptcp_mp_fail_no_response() can reach here on an already closed
407 	 * socket
408 	 */
409 	if (ssk->sk_state == TCP_CLOSE)
410 		return;
411 
412 	/* must hold: tcp_done() could drop last reference on parent */
413 	sock_hold(sk);
414 
415 	tcp_send_active_reset(ssk, GFP_ATOMIC);
416 	tcp_done(ssk);
417 	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
418 		mptcp_schedule_work(sk);
419 
420 	sock_put(sk);
421 }
422 
423 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
424 {
425 	return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
426 }
427 
428 void __mptcp_sync_state(struct sock *sk, int state)
429 {
430 	struct mptcp_subflow_context *subflow;
431 	struct mptcp_sock *msk = mptcp_sk(sk);
432 	struct sock *ssk = msk->first;
433 
434 	subflow = mptcp_subflow_ctx(ssk);
435 	__mptcp_propagate_sndbuf(sk, ssk);
436 	if (!msk->rcvspace_init)
437 		mptcp_rcv_space_init(msk, ssk);
438 
439 	if (sk->sk_state == TCP_SYN_SENT) {
440 		/* subflow->idsn is always available is TCP_SYN_SENT state,
441 		 * even for the FASTOPEN scenarios
442 		 */
443 		WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
444 		WRITE_ONCE(msk->snd_nxt, msk->write_seq);
445 		mptcp_set_state(sk, state);
446 		sk->sk_state_change(sk);
447 	}
448 }
449 
450 static void subflow_set_remote_key(struct mptcp_sock *msk,
451 				   struct mptcp_subflow_context *subflow,
452 				   const struct mptcp_options_received *mp_opt)
453 {
454 	/* active MPC subflow will reach here multiple times:
455 	 * at subflow_finish_connect() time and at 4th ack time
456 	 */
457 	if (subflow->remote_key_valid)
458 		return;
459 
460 	subflow->remote_key_valid = 1;
461 	subflow->remote_key = mp_opt->sndr_key;
462 	mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
463 	subflow->iasn++;
464 
465 	WRITE_ONCE(msk->remote_key, subflow->remote_key);
466 	WRITE_ONCE(msk->ack_seq, subflow->iasn);
467 	WRITE_ONCE(msk->can_ack, true);
468 	atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
469 }
470 
471 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
472 				  struct mptcp_subflow_context *subflow,
473 				  const struct mptcp_options_received *mp_opt)
474 {
475 	struct mptcp_sock *msk = mptcp_sk(sk);
476 
477 	mptcp_data_lock(sk);
478 	if (mp_opt) {
479 		/* Options are available only in the non fallback cases
480 		 * avoid updating rx path fields otherwise
481 		 */
482 		WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
483 		WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
484 		subflow_set_remote_key(msk, subflow, mp_opt);
485 	}
486 
487 	if (!sock_owned_by_user(sk)) {
488 		__mptcp_sync_state(sk, ssk->sk_state);
489 	} else {
490 		msk->pending_state = ssk->sk_state;
491 		__set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
492 	}
493 	mptcp_data_unlock(sk);
494 }
495 
496 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
497 {
498 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
499 	struct mptcp_options_received mp_opt;
500 	struct sock *parent = subflow->conn;
501 	struct mptcp_sock *msk;
502 
503 	subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
504 
505 	/* be sure no special action on any packet other than syn-ack */
506 	if (subflow->conn_finished)
507 		return;
508 
509 	msk = mptcp_sk(parent);
510 	subflow->rel_write_seq = 1;
511 	subflow->conn_finished = 1;
512 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
513 	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
514 
515 	mptcp_get_options(skb, &mp_opt);
516 	if (subflow->request_mptcp) {
517 		if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
518 			MPTCP_INC_STATS(sock_net(sk),
519 					MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
520 			mptcp_do_fallback(sk);
521 			pr_fallback(msk);
522 			goto fallback;
523 		}
524 
525 		if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
526 			WRITE_ONCE(msk->csum_enabled, true);
527 		if (mp_opt.deny_join_id0)
528 			WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
529 		subflow->mp_capable = 1;
530 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
531 		mptcp_finish_connect(sk);
532 		mptcp_propagate_state(parent, sk, subflow, &mp_opt);
533 	} else if (subflow->request_join) {
534 		u8 hmac[SHA256_DIGEST_SIZE];
535 
536 		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
537 			subflow->reset_reason = MPTCP_RST_EMPTCP;
538 			goto do_reset;
539 		}
540 
541 		subflow->backup = mp_opt.backup;
542 		subflow->thmac = mp_opt.thmac;
543 		subflow->remote_nonce = mp_opt.nonce;
544 		WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
545 		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
546 			 subflow, subflow->thmac, subflow->remote_nonce,
547 			 subflow->backup);
548 
549 		if (!subflow_thmac_valid(subflow)) {
550 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
551 			subflow->reset_reason = MPTCP_RST_EMPTCP;
552 			goto do_reset;
553 		}
554 
555 		if (!mptcp_finish_join(sk))
556 			goto do_reset;
557 
558 		subflow_generate_hmac(subflow->local_key, subflow->remote_key,
559 				      subflow->local_nonce,
560 				      subflow->remote_nonce,
561 				      hmac);
562 		memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
563 
564 		subflow->mp_join = 1;
565 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
566 
567 		if (subflow_use_different_dport(msk, sk)) {
568 			pr_debug("synack inet_dport=%d %d",
569 				 ntohs(inet_sk(sk)->inet_dport),
570 				 ntohs(inet_sk(parent)->inet_dport));
571 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
572 		}
573 	} else if (mptcp_check_fallback(sk)) {
574 fallback:
575 		mptcp_propagate_state(parent, sk, subflow, NULL);
576 	}
577 	return;
578 
579 do_reset:
580 	subflow->reset_transient = 0;
581 	mptcp_subflow_reset(sk);
582 }
583 
584 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
585 {
586 	WARN_ON_ONCE(local_id < 0 || local_id > 255);
587 	WRITE_ONCE(subflow->local_id, local_id);
588 }
589 
590 static int subflow_chk_local_id(struct sock *sk)
591 {
592 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
593 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
594 	int err;
595 
596 	if (likely(subflow->local_id >= 0))
597 		return 0;
598 
599 	err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
600 	if (err < 0)
601 		return err;
602 
603 	subflow_set_local_id(subflow, err);
604 	return 0;
605 }
606 
607 static int subflow_rebuild_header(struct sock *sk)
608 {
609 	int err = subflow_chk_local_id(sk);
610 
611 	if (unlikely(err < 0))
612 		return err;
613 
614 	return inet_sk_rebuild_header(sk);
615 }
616 
617 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
618 static int subflow_v6_rebuild_header(struct sock *sk)
619 {
620 	int err = subflow_chk_local_id(sk);
621 
622 	if (unlikely(err < 0))
623 		return err;
624 
625 	return inet6_sk_rebuild_header(sk);
626 }
627 #endif
628 
629 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
630 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
631 
632 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
633 {
634 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
635 
636 	pr_debug("subflow=%p", subflow);
637 
638 	/* Never answer to SYNs sent to broadcast or multicast */
639 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
640 		goto drop;
641 
642 	return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
643 				&subflow_request_sock_ipv4_ops,
644 				sk, skb);
645 drop:
646 	tcp_listendrop(sk);
647 	return 0;
648 }
649 
650 static void subflow_v4_req_destructor(struct request_sock *req)
651 {
652 	subflow_req_destructor(req);
653 	tcp_request_sock_ops.destructor(req);
654 }
655 
656 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
657 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
658 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
659 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
660 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
661 static struct proto tcpv6_prot_override __ro_after_init;
662 
663 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
664 {
665 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
666 
667 	pr_debug("subflow=%p", subflow);
668 
669 	if (skb->protocol == htons(ETH_P_IP))
670 		return subflow_v4_conn_request(sk, skb);
671 
672 	if (!ipv6_unicast_destination(skb))
673 		goto drop;
674 
675 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
676 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
677 		return 0;
678 	}
679 
680 	return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
681 				&subflow_request_sock_ipv6_ops, sk, skb);
682 
683 drop:
684 	tcp_listendrop(sk);
685 	return 0; /* don't send reset */
686 }
687 
688 static void subflow_v6_req_destructor(struct request_sock *req)
689 {
690 	subflow_req_destructor(req);
691 	tcp6_request_sock_ops.destructor(req);
692 }
693 #endif
694 
695 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
696 					       struct sock *sk_listener,
697 					       bool attach_listener)
698 {
699 	if (ops->family == AF_INET)
700 		ops = &mptcp_subflow_v4_request_sock_ops;
701 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
702 	else if (ops->family == AF_INET6)
703 		ops = &mptcp_subflow_v6_request_sock_ops;
704 #endif
705 
706 	return inet_reqsk_alloc(ops, sk_listener, attach_listener);
707 }
708 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
709 
710 /* validate hmac received in third ACK */
711 static bool subflow_hmac_valid(const struct request_sock *req,
712 			       const struct mptcp_options_received *mp_opt)
713 {
714 	const struct mptcp_subflow_request_sock *subflow_req;
715 	u8 hmac[SHA256_DIGEST_SIZE];
716 	struct mptcp_sock *msk;
717 
718 	subflow_req = mptcp_subflow_rsk(req);
719 	msk = subflow_req->msk;
720 	if (!msk)
721 		return false;
722 
723 	subflow_generate_hmac(READ_ONCE(msk->remote_key),
724 			      READ_ONCE(msk->local_key),
725 			      subflow_req->remote_nonce,
726 			      subflow_req->local_nonce, hmac);
727 
728 	return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
729 }
730 
731 static void subflow_ulp_fallback(struct sock *sk,
732 				 struct mptcp_subflow_context *old_ctx)
733 {
734 	struct inet_connection_sock *icsk = inet_csk(sk);
735 
736 	mptcp_subflow_tcp_fallback(sk, old_ctx);
737 	icsk->icsk_ulp_ops = NULL;
738 	rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
739 	tcp_sk(sk)->is_mptcp = 0;
740 
741 	mptcp_subflow_ops_undo_override(sk);
742 }
743 
744 void mptcp_subflow_drop_ctx(struct sock *ssk)
745 {
746 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
747 
748 	if (!ctx)
749 		return;
750 
751 	list_del(&mptcp_subflow_ctx(ssk)->node);
752 	if (inet_csk(ssk)->icsk_ulp_ops) {
753 		subflow_ulp_fallback(ssk, ctx);
754 		if (ctx->conn)
755 			sock_put(ctx->conn);
756 	}
757 
758 	kfree_rcu(ctx, rcu);
759 }
760 
761 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
762 				       struct mptcp_subflow_context *subflow,
763 				       const struct mptcp_options_received *mp_opt)
764 {
765 	subflow_set_remote_key(msk, subflow, mp_opt);
766 	subflow->fully_established = 1;
767 	WRITE_ONCE(msk->fully_established, true);
768 
769 	if (subflow->is_mptfo)
770 		__mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
771 }
772 
773 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
774 					  struct sk_buff *skb,
775 					  struct request_sock *req,
776 					  struct dst_entry *dst,
777 					  struct request_sock *req_unhash,
778 					  bool *own_req)
779 {
780 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
781 	struct mptcp_subflow_request_sock *subflow_req;
782 	struct mptcp_options_received mp_opt;
783 	bool fallback, fallback_is_fatal;
784 	struct mptcp_sock *owner;
785 	struct sock *child;
786 
787 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
788 
789 	/* After child creation we must look for MPC even when options
790 	 * are not parsed
791 	 */
792 	mp_opt.suboptions = 0;
793 
794 	/* hopefully temporary handling for MP_JOIN+syncookie */
795 	subflow_req = mptcp_subflow_rsk(req);
796 	fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
797 	fallback = !tcp_rsk(req)->is_mptcp;
798 	if (fallback)
799 		goto create_child;
800 
801 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
802 	if (subflow_req->mp_capable) {
803 		/* we can receive and accept an in-window, out-of-order pkt,
804 		 * which may not carry the MP_CAPABLE opt even on mptcp enabled
805 		 * paths: always try to extract the peer key, and fallback
806 		 * for packets missing it.
807 		 * Even OoO DSS packets coming legitly after dropped or
808 		 * reordered MPC will cause fallback, but we don't have other
809 		 * options.
810 		 */
811 		mptcp_get_options(skb, &mp_opt);
812 		if (!(mp_opt.suboptions &
813 		      (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
814 			fallback = true;
815 
816 	} else if (subflow_req->mp_join) {
817 		mptcp_get_options(skb, &mp_opt);
818 		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
819 		    !subflow_hmac_valid(req, &mp_opt) ||
820 		    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
821 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
822 			fallback = true;
823 		}
824 	}
825 
826 create_child:
827 	child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
828 						     req_unhash, own_req);
829 
830 	if (child && *own_req) {
831 		struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
832 
833 		tcp_rsk(req)->drop_req = false;
834 
835 		/* we need to fallback on ctx allocation failure and on pre-reqs
836 		 * checking above. In the latter scenario we additionally need
837 		 * to reset the context to non MPTCP status.
838 		 */
839 		if (!ctx || fallback) {
840 			if (fallback_is_fatal) {
841 				subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
842 				goto dispose_child;
843 			}
844 			goto fallback;
845 		}
846 
847 		/* ssk inherits options of listener sk */
848 		ctx->setsockopt_seq = listener->setsockopt_seq;
849 
850 		if (ctx->mp_capable) {
851 			ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
852 			if (!ctx->conn)
853 				goto fallback;
854 
855 			ctx->subflow_id = 1;
856 			owner = mptcp_sk(ctx->conn);
857 			mptcp_pm_new_connection(owner, child, 1);
858 
859 			/* with OoO packets we can reach here without ingress
860 			 * mpc option
861 			 */
862 			if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
863 				mptcp_pm_fully_established(owner, child);
864 				ctx->pm_notified = 1;
865 			}
866 		} else if (ctx->mp_join) {
867 			owner = subflow_req->msk;
868 			if (!owner) {
869 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
870 				goto dispose_child;
871 			}
872 
873 			/* move the msk reference ownership to the subflow */
874 			subflow_req->msk = NULL;
875 			ctx->conn = (struct sock *)owner;
876 
877 			if (subflow_use_different_sport(owner, sk)) {
878 				pr_debug("ack inet_sport=%d %d",
879 					 ntohs(inet_sk(sk)->inet_sport),
880 					 ntohs(inet_sk((struct sock *)owner)->inet_sport));
881 				if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
882 					SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
883 					subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
884 					goto dispose_child;
885 				}
886 				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
887 			}
888 
889 			if (!mptcp_finish_join(child)) {
890 				struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child);
891 
892 				subflow_add_reset_reason(skb, subflow->reset_reason);
893 				goto dispose_child;
894 			}
895 
896 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
897 			tcp_rsk(req)->drop_req = true;
898 		}
899 	}
900 
901 	/* check for expected invariant - should never trigger, just help
902 	 * catching eariler subtle bugs
903 	 */
904 	WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
905 		     (!mptcp_subflow_ctx(child) ||
906 		      !mptcp_subflow_ctx(child)->conn));
907 	return child;
908 
909 dispose_child:
910 	mptcp_subflow_drop_ctx(child);
911 	tcp_rsk(req)->drop_req = true;
912 	inet_csk_prepare_for_destroy_sock(child);
913 	tcp_done(child);
914 	req->rsk_ops->send_reset(sk, skb);
915 
916 	/* The last child reference will be released by the caller */
917 	return child;
918 
919 fallback:
920 	if (fallback)
921 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
922 	mptcp_subflow_drop_ctx(child);
923 	return child;
924 }
925 
926 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
927 static struct proto tcp_prot_override __ro_after_init;
928 
929 enum mapping_status {
930 	MAPPING_OK,
931 	MAPPING_INVALID,
932 	MAPPING_EMPTY,
933 	MAPPING_DATA_FIN,
934 	MAPPING_DUMMY,
935 	MAPPING_BAD_CSUM
936 };
937 
938 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
939 {
940 	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
941 		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
942 }
943 
944 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
945 {
946 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
947 	unsigned int skb_consumed;
948 
949 	skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
950 	if (WARN_ON_ONCE(skb_consumed >= skb->len))
951 		return true;
952 
953 	return skb->len - skb_consumed <= subflow->map_data_len -
954 					  mptcp_subflow_get_map_offset(subflow);
955 }
956 
957 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
958 {
959 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
960 	u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
961 
962 	if (unlikely(before(ssn, subflow->map_subflow_seq))) {
963 		/* Mapping covers data later in the subflow stream,
964 		 * currently unsupported.
965 		 */
966 		dbg_bad_map(subflow, ssn);
967 		return false;
968 	}
969 	if (unlikely(!before(ssn, subflow->map_subflow_seq +
970 				  subflow->map_data_len))) {
971 		/* Mapping does covers past subflow data, invalid */
972 		dbg_bad_map(subflow, ssn);
973 		return false;
974 	}
975 	return true;
976 }
977 
978 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
979 					      bool csum_reqd)
980 {
981 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
982 	u32 offset, seq, delta;
983 	__sum16 csum;
984 	int len;
985 
986 	if (!csum_reqd)
987 		return MAPPING_OK;
988 
989 	/* mapping already validated on previous traversal */
990 	if (subflow->map_csum_len == subflow->map_data_len)
991 		return MAPPING_OK;
992 
993 	/* traverse the receive queue, ensuring it contains a full
994 	 * DSS mapping and accumulating the related csum.
995 	 * Preserve the accoumlate csum across multiple calls, to compute
996 	 * the csum only once
997 	 */
998 	delta = subflow->map_data_len - subflow->map_csum_len;
999 	for (;;) {
1000 		seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
1001 		offset = seq - TCP_SKB_CB(skb)->seq;
1002 
1003 		/* if the current skb has not been accounted yet, csum its contents
1004 		 * up to the amount covered by the current DSS
1005 		 */
1006 		if (offset < skb->len) {
1007 			__wsum csum;
1008 
1009 			len = min(skb->len - offset, delta);
1010 			csum = skb_checksum(skb, offset, len, 0);
1011 			subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
1012 								subflow->map_csum_len);
1013 
1014 			delta -= len;
1015 			subflow->map_csum_len += len;
1016 		}
1017 		if (delta == 0)
1018 			break;
1019 
1020 		if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
1021 			/* if this subflow is closed, the partial mapping
1022 			 * will be never completed; flush the pending skbs, so
1023 			 * that subflow_sched_work_if_closed() can kick in
1024 			 */
1025 			if (unlikely(ssk->sk_state == TCP_CLOSE))
1026 				while ((skb = skb_peek(&ssk->sk_receive_queue)))
1027 					sk_eat_skb(ssk, skb);
1028 
1029 			/* not enough data to validate the csum */
1030 			return MAPPING_EMPTY;
1031 		}
1032 
1033 		/* the DSS mapping for next skbs will be validated later,
1034 		 * when a get_mapping_status call will process such skb
1035 		 */
1036 		skb = skb->next;
1037 	}
1038 
1039 	/* note that 'map_data_len' accounts only for the carried data, does
1040 	 * not include the eventual seq increment due to the data fin,
1041 	 * while the pseudo header requires the original DSS data len,
1042 	 * including that
1043 	 */
1044 	csum = __mptcp_make_csum(subflow->map_seq,
1045 				 subflow->map_subflow_seq,
1046 				 subflow->map_data_len + subflow->map_data_fin,
1047 				 subflow->map_data_csum);
1048 	if (unlikely(csum)) {
1049 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1050 		return MAPPING_BAD_CSUM;
1051 	}
1052 
1053 	subflow->valid_csum_seen = 1;
1054 	return MAPPING_OK;
1055 }
1056 
1057 static enum mapping_status get_mapping_status(struct sock *ssk,
1058 					      struct mptcp_sock *msk)
1059 {
1060 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1061 	bool csum_reqd = READ_ONCE(msk->csum_enabled);
1062 	struct mptcp_ext *mpext;
1063 	struct sk_buff *skb;
1064 	u16 data_len;
1065 	u64 map_seq;
1066 
1067 	skb = skb_peek(&ssk->sk_receive_queue);
1068 	if (!skb)
1069 		return MAPPING_EMPTY;
1070 
1071 	if (mptcp_check_fallback(ssk))
1072 		return MAPPING_DUMMY;
1073 
1074 	mpext = mptcp_get_ext(skb);
1075 	if (!mpext || !mpext->use_map) {
1076 		if (!subflow->map_valid && !skb->len) {
1077 			/* the TCP stack deliver 0 len FIN pkt to the receive
1078 			 * queue, that is the only 0len pkts ever expected here,
1079 			 * and we can admit no mapping only for 0 len pkts
1080 			 */
1081 			if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1082 				WARN_ONCE(1, "0len seq %d:%d flags %x",
1083 					  TCP_SKB_CB(skb)->seq,
1084 					  TCP_SKB_CB(skb)->end_seq,
1085 					  TCP_SKB_CB(skb)->tcp_flags);
1086 			sk_eat_skb(ssk, skb);
1087 			return MAPPING_EMPTY;
1088 		}
1089 
1090 		if (!subflow->map_valid)
1091 			return MAPPING_INVALID;
1092 
1093 		goto validate_seq;
1094 	}
1095 
1096 	trace_get_mapping_status(mpext);
1097 
1098 	data_len = mpext->data_len;
1099 	if (data_len == 0) {
1100 		pr_debug("infinite mapping received");
1101 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1102 		subflow->map_data_len = 0;
1103 		return MAPPING_INVALID;
1104 	}
1105 
1106 	if (mpext->data_fin == 1) {
1107 		if (data_len == 1) {
1108 			bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1109 								 mpext->dsn64);
1110 			pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1111 			if (subflow->map_valid) {
1112 				/* A DATA_FIN might arrive in a DSS
1113 				 * option before the previous mapping
1114 				 * has been fully consumed. Continue
1115 				 * handling the existing mapping.
1116 				 */
1117 				skb_ext_del(skb, SKB_EXT_MPTCP);
1118 				return MAPPING_OK;
1119 			} else {
1120 				if (updated)
1121 					mptcp_schedule_work((struct sock *)msk);
1122 
1123 				return MAPPING_DATA_FIN;
1124 			}
1125 		} else {
1126 			u64 data_fin_seq = mpext->data_seq + data_len - 1;
1127 
1128 			/* If mpext->data_seq is a 32-bit value, data_fin_seq
1129 			 * must also be limited to 32 bits.
1130 			 */
1131 			if (!mpext->dsn64)
1132 				data_fin_seq &= GENMASK_ULL(31, 0);
1133 
1134 			mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1135 			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1136 				 data_fin_seq, mpext->dsn64);
1137 		}
1138 
1139 		/* Adjust for DATA_FIN using 1 byte of sequence space */
1140 		data_len--;
1141 	}
1142 
1143 	map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1144 	WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1145 
1146 	if (subflow->map_valid) {
1147 		/* Allow replacing only with an identical map */
1148 		if (subflow->map_seq == map_seq &&
1149 		    subflow->map_subflow_seq == mpext->subflow_seq &&
1150 		    subflow->map_data_len == data_len &&
1151 		    subflow->map_csum_reqd == mpext->csum_reqd) {
1152 			skb_ext_del(skb, SKB_EXT_MPTCP);
1153 			goto validate_csum;
1154 		}
1155 
1156 		/* If this skb data are fully covered by the current mapping,
1157 		 * the new map would need caching, which is not supported
1158 		 */
1159 		if (skb_is_fully_mapped(ssk, skb)) {
1160 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1161 			return MAPPING_INVALID;
1162 		}
1163 
1164 		/* will validate the next map after consuming the current one */
1165 		goto validate_csum;
1166 	}
1167 
1168 	subflow->map_seq = map_seq;
1169 	subflow->map_subflow_seq = mpext->subflow_seq;
1170 	subflow->map_data_len = data_len;
1171 	subflow->map_valid = 1;
1172 	subflow->map_data_fin = mpext->data_fin;
1173 	subflow->mpc_map = mpext->mpc_map;
1174 	subflow->map_csum_reqd = mpext->csum_reqd;
1175 	subflow->map_csum_len = 0;
1176 	subflow->map_data_csum = csum_unfold(mpext->csum);
1177 
1178 	/* Cfr RFC 8684 Section 3.3.0 */
1179 	if (unlikely(subflow->map_csum_reqd != csum_reqd))
1180 		return MAPPING_INVALID;
1181 
1182 	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1183 		 subflow->map_seq, subflow->map_subflow_seq,
1184 		 subflow->map_data_len, subflow->map_csum_reqd,
1185 		 subflow->map_data_csum);
1186 
1187 validate_seq:
1188 	/* we revalidate valid mapping on new skb, because we must ensure
1189 	 * the current skb is completely covered by the available mapping
1190 	 */
1191 	if (!validate_mapping(ssk, skb)) {
1192 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1193 		return MAPPING_INVALID;
1194 	}
1195 
1196 	skb_ext_del(skb, SKB_EXT_MPTCP);
1197 
1198 validate_csum:
1199 	return validate_data_csum(ssk, skb, csum_reqd);
1200 }
1201 
1202 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1203 				       u64 limit)
1204 {
1205 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1206 	bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1207 	u32 incr;
1208 
1209 	incr = limit >= skb->len ? skb->len + fin : limit;
1210 
1211 	pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1212 		 subflow->map_subflow_seq);
1213 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1214 	tcp_sk(ssk)->copied_seq += incr;
1215 	if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1216 		sk_eat_skb(ssk, skb);
1217 	if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1218 		subflow->map_valid = 0;
1219 }
1220 
1221 /* sched mptcp worker to remove the subflow if no more data is pending */
1222 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1223 {
1224 	if (likely(ssk->sk_state != TCP_CLOSE))
1225 		return;
1226 
1227 	if (skb_queue_empty(&ssk->sk_receive_queue) &&
1228 	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1229 		mptcp_schedule_work((struct sock *)msk);
1230 }
1231 
1232 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1233 {
1234 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1235 
1236 	if (subflow->mp_join)
1237 		return false;
1238 	else if (READ_ONCE(msk->csum_enabled))
1239 		return !subflow->valid_csum_seen;
1240 	else
1241 		return !subflow->fully_established;
1242 }
1243 
1244 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1245 {
1246 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1247 	unsigned long fail_tout;
1248 
1249 	/* greceful failure can happen only on the MPC subflow */
1250 	if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1251 		return;
1252 
1253 	/* since the close timeout take precedence on the fail one,
1254 	 * no need to start the latter when the first is already set
1255 	 */
1256 	if (sock_flag((struct sock *)msk, SOCK_DEAD))
1257 		return;
1258 
1259 	/* we don't need extreme accuracy here, use a zero fail_tout as special
1260 	 * value meaning no fail timeout at all;
1261 	 */
1262 	fail_tout = jiffies + TCP_RTO_MAX;
1263 	if (!fail_tout)
1264 		fail_tout = 1;
1265 	WRITE_ONCE(subflow->fail_tout, fail_tout);
1266 	tcp_send_ack(ssk);
1267 
1268 	mptcp_reset_tout_timer(msk, subflow->fail_tout);
1269 }
1270 
1271 static bool subflow_check_data_avail(struct sock *ssk)
1272 {
1273 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1274 	enum mapping_status status;
1275 	struct mptcp_sock *msk;
1276 	struct sk_buff *skb;
1277 
1278 	if (!skb_peek(&ssk->sk_receive_queue))
1279 		WRITE_ONCE(subflow->data_avail, false);
1280 	if (subflow->data_avail)
1281 		return true;
1282 
1283 	msk = mptcp_sk(subflow->conn);
1284 	for (;;) {
1285 		u64 ack_seq;
1286 		u64 old_ack;
1287 
1288 		status = get_mapping_status(ssk, msk);
1289 		trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1290 		if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1291 			     status == MAPPING_BAD_CSUM))
1292 			goto fallback;
1293 
1294 		if (status != MAPPING_OK)
1295 			goto no_data;
1296 
1297 		skb = skb_peek(&ssk->sk_receive_queue);
1298 		if (WARN_ON_ONCE(!skb))
1299 			goto no_data;
1300 
1301 		if (unlikely(!READ_ONCE(msk->can_ack)))
1302 			goto fallback;
1303 
1304 		old_ack = READ_ONCE(msk->ack_seq);
1305 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1306 		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1307 			 ack_seq);
1308 		if (unlikely(before64(ack_seq, old_ack))) {
1309 			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1310 			continue;
1311 		}
1312 
1313 		WRITE_ONCE(subflow->data_avail, true);
1314 		break;
1315 	}
1316 	return true;
1317 
1318 no_data:
1319 	subflow_sched_work_if_closed(msk, ssk);
1320 	return false;
1321 
1322 fallback:
1323 	if (!__mptcp_check_fallback(msk)) {
1324 		/* RFC 8684 section 3.7. */
1325 		if (status == MAPPING_BAD_CSUM &&
1326 		    (subflow->mp_join || subflow->valid_csum_seen)) {
1327 			subflow->send_mp_fail = 1;
1328 
1329 			if (!READ_ONCE(msk->allow_infinite_fallback)) {
1330 				subflow->reset_transient = 0;
1331 				subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1332 				goto reset;
1333 			}
1334 			mptcp_subflow_fail(msk, ssk);
1335 			WRITE_ONCE(subflow->data_avail, true);
1336 			return true;
1337 		}
1338 
1339 		if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1340 			/* fatal protocol error, close the socket.
1341 			 * subflow_error_report() will introduce the appropriate barriers
1342 			 */
1343 			subflow->reset_transient = 0;
1344 			subflow->reset_reason = MPTCP_RST_EMPTCP;
1345 
1346 reset:
1347 			WRITE_ONCE(ssk->sk_err, EBADMSG);
1348 			tcp_set_state(ssk, TCP_CLOSE);
1349 			while ((skb = skb_peek(&ssk->sk_receive_queue)))
1350 				sk_eat_skb(ssk, skb);
1351 			tcp_send_active_reset(ssk, GFP_ATOMIC);
1352 			WRITE_ONCE(subflow->data_avail, false);
1353 			return false;
1354 		}
1355 
1356 		mptcp_do_fallback(ssk);
1357 	}
1358 
1359 	skb = skb_peek(&ssk->sk_receive_queue);
1360 	subflow->map_valid = 1;
1361 	subflow->map_seq = READ_ONCE(msk->ack_seq);
1362 	subflow->map_data_len = skb->len;
1363 	subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1364 	WRITE_ONCE(subflow->data_avail, true);
1365 	return true;
1366 }
1367 
1368 bool mptcp_subflow_data_available(struct sock *sk)
1369 {
1370 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1371 
1372 	/* check if current mapping is still valid */
1373 	if (subflow->map_valid &&
1374 	    mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1375 		subflow->map_valid = 0;
1376 		WRITE_ONCE(subflow->data_avail, false);
1377 
1378 		pr_debug("Done with mapping: seq=%u data_len=%u",
1379 			 subflow->map_subflow_seq,
1380 			 subflow->map_data_len);
1381 	}
1382 
1383 	return subflow_check_data_avail(sk);
1384 }
1385 
1386 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1387  * not the ssk one.
1388  *
1389  * In mptcp, rwin is about the mptcp-level connection data.
1390  *
1391  * Data that is still on the ssk rx queue can thus be ignored,
1392  * as far as mptcp peer is concerned that data is still inflight.
1393  * DSS ACK is updated when skb is moved to the mptcp rx queue.
1394  */
1395 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1396 {
1397 	const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1398 	const struct sock *sk = subflow->conn;
1399 
1400 	*space = __mptcp_space(sk);
1401 	*full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1402 }
1403 
1404 static void subflow_error_report(struct sock *ssk)
1405 {
1406 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1407 
1408 	/* bail early if this is a no-op, so that we avoid introducing a
1409 	 * problematic lockdep dependency between TCP accept queue lock
1410 	 * and msk socket spinlock
1411 	 */
1412 	if (!sk->sk_socket)
1413 		return;
1414 
1415 	mptcp_data_lock(sk);
1416 	if (!sock_owned_by_user(sk))
1417 		__mptcp_error_report(sk);
1418 	else
1419 		__set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->cb_flags);
1420 	mptcp_data_unlock(sk);
1421 }
1422 
1423 static void subflow_data_ready(struct sock *sk)
1424 {
1425 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1426 	u16 state = 1 << inet_sk_state_load(sk);
1427 	struct sock *parent = subflow->conn;
1428 	struct mptcp_sock *msk;
1429 
1430 	trace_sk_data_ready(sk);
1431 
1432 	msk = mptcp_sk(parent);
1433 	if (state & TCPF_LISTEN) {
1434 		/* MPJ subflow are removed from accept queue before reaching here,
1435 		 * avoid stray wakeups
1436 		 */
1437 		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1438 			return;
1439 
1440 		parent->sk_data_ready(parent);
1441 		return;
1442 	}
1443 
1444 	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1445 		     !subflow->mp_join && !(state & TCPF_CLOSE));
1446 
1447 	if (mptcp_subflow_data_available(sk)) {
1448 		mptcp_data_ready(parent, sk);
1449 
1450 		/* subflow-level lowat test are not relevant.
1451 		 * respect the msk-level threshold eventually mandating an immediate ack
1452 		 */
1453 		if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
1454 		    (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss)
1455 			inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
1456 	} else if (unlikely(sk->sk_err)) {
1457 		subflow_error_report(sk);
1458 	}
1459 }
1460 
1461 static void subflow_write_space(struct sock *ssk)
1462 {
1463 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1464 
1465 	mptcp_propagate_sndbuf(sk, ssk);
1466 	mptcp_write_space(sk);
1467 }
1468 
1469 static const struct inet_connection_sock_af_ops *
1470 subflow_default_af_ops(struct sock *sk)
1471 {
1472 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1473 	if (sk->sk_family == AF_INET6)
1474 		return &subflow_v6_specific;
1475 #endif
1476 	return &subflow_specific;
1477 }
1478 
1479 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1480 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1481 {
1482 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1483 	struct inet_connection_sock *icsk = inet_csk(sk);
1484 	const struct inet_connection_sock_af_ops *target;
1485 
1486 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1487 
1488 	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1489 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1490 
1491 	if (likely(icsk->icsk_af_ops == target))
1492 		return;
1493 
1494 	subflow->icsk_af_ops = icsk->icsk_af_ops;
1495 	icsk->icsk_af_ops = target;
1496 }
1497 #endif
1498 
1499 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1500 			 struct sockaddr_storage *addr,
1501 			 unsigned short family)
1502 {
1503 	memset(addr, 0, sizeof(*addr));
1504 	addr->ss_family = family;
1505 	if (addr->ss_family == AF_INET) {
1506 		struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1507 
1508 		if (info->family == AF_INET)
1509 			in_addr->sin_addr = info->addr;
1510 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1511 		else if (ipv6_addr_v4mapped(&info->addr6))
1512 			in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1513 #endif
1514 		in_addr->sin_port = info->port;
1515 	}
1516 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1517 	else if (addr->ss_family == AF_INET6) {
1518 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1519 
1520 		if (info->family == AF_INET)
1521 			ipv6_addr_set_v4mapped(info->addr.s_addr,
1522 					       &in6_addr->sin6_addr);
1523 		else
1524 			in6_addr->sin6_addr = info->addr6;
1525 		in6_addr->sin6_port = info->port;
1526 	}
1527 #endif
1528 }
1529 
1530 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1531 			    const struct mptcp_addr_info *remote)
1532 {
1533 	struct mptcp_sock *msk = mptcp_sk(sk);
1534 	struct mptcp_subflow_context *subflow;
1535 	struct sockaddr_storage addr;
1536 	int remote_id = remote->id;
1537 	int local_id = loc->id;
1538 	int err = -ENOTCONN;
1539 	struct socket *sf;
1540 	struct sock *ssk;
1541 	u32 remote_token;
1542 	int addrlen;
1543 	int ifindex;
1544 	u8 flags;
1545 
1546 	if (!mptcp_is_fully_established(sk))
1547 		goto err_out;
1548 
1549 	err = mptcp_subflow_create_socket(sk, loc->family, &sf);
1550 	if (err)
1551 		goto err_out;
1552 
1553 	ssk = sf->sk;
1554 	subflow = mptcp_subflow_ctx(ssk);
1555 	do {
1556 		get_random_bytes(&subflow->local_nonce, sizeof(u32));
1557 	} while (!subflow->local_nonce);
1558 
1559 	if (local_id)
1560 		subflow_set_local_id(subflow, local_id);
1561 
1562 	mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1563 					     &flags, &ifindex);
1564 	subflow->remote_key_valid = 1;
1565 	subflow->remote_key = READ_ONCE(msk->remote_key);
1566 	subflow->local_key = READ_ONCE(msk->local_key);
1567 	subflow->token = msk->token;
1568 	mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1569 
1570 	addrlen = sizeof(struct sockaddr_in);
1571 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1572 	if (addr.ss_family == AF_INET6)
1573 		addrlen = sizeof(struct sockaddr_in6);
1574 #endif
1575 	ssk->sk_bound_dev_if = ifindex;
1576 	err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1577 	if (err)
1578 		goto failed;
1579 
1580 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1581 	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1582 		 remote_token, local_id, remote_id);
1583 	subflow->remote_token = remote_token;
1584 	WRITE_ONCE(subflow->remote_id, remote_id);
1585 	subflow->request_join = 1;
1586 	subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1587 	subflow->subflow_id = msk->subflow_id++;
1588 	mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1589 
1590 	sock_hold(ssk);
1591 	list_add_tail(&subflow->node, &msk->conn_list);
1592 	err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1593 	if (err && err != -EINPROGRESS)
1594 		goto failed_unlink;
1595 
1596 	/* discard the subflow socket */
1597 	mptcp_sock_graft(ssk, sk->sk_socket);
1598 	iput(SOCK_INODE(sf));
1599 	WRITE_ONCE(msk->allow_infinite_fallback, false);
1600 	mptcp_stop_tout_timer(sk);
1601 	return 0;
1602 
1603 failed_unlink:
1604 	list_del(&subflow->node);
1605 	sock_put(mptcp_subflow_tcp_sock(subflow));
1606 
1607 failed:
1608 	subflow->disposable = 1;
1609 	sock_release(sf);
1610 
1611 err_out:
1612 	/* we account subflows before the creation, and this failures will not
1613 	 * be caught by sk_state_change()
1614 	 */
1615 	mptcp_pm_close_subflow(msk);
1616 	return err;
1617 }
1618 
1619 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1620 {
1621 #ifdef CONFIG_SOCK_CGROUP_DATA
1622 	struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1623 				*child_skcd = &child->sk_cgrp_data;
1624 
1625 	/* only the additional subflows created by kworkers have to be modified */
1626 	if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1627 	    cgroup_id(sock_cgroup_ptr(child_skcd))) {
1628 #ifdef CONFIG_MEMCG
1629 		struct mem_cgroup *memcg = parent->sk_memcg;
1630 
1631 		mem_cgroup_sk_free(child);
1632 		if (memcg && css_tryget(&memcg->css))
1633 			child->sk_memcg = memcg;
1634 #endif /* CONFIG_MEMCG */
1635 
1636 		cgroup_sk_free(child_skcd);
1637 		*child_skcd = *parent_skcd;
1638 		cgroup_sk_clone(child_skcd);
1639 	}
1640 #endif /* CONFIG_SOCK_CGROUP_DATA */
1641 }
1642 
1643 static void mptcp_subflow_ops_override(struct sock *ssk)
1644 {
1645 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1646 	if (ssk->sk_prot == &tcpv6_prot)
1647 		ssk->sk_prot = &tcpv6_prot_override;
1648 	else
1649 #endif
1650 		ssk->sk_prot = &tcp_prot_override;
1651 }
1652 
1653 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1654 {
1655 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1656 	if (ssk->sk_prot == &tcpv6_prot_override)
1657 		ssk->sk_prot = &tcpv6_prot;
1658 	else
1659 #endif
1660 		ssk->sk_prot = &tcp_prot;
1661 }
1662 
1663 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1664 				struct socket **new_sock)
1665 {
1666 	struct mptcp_subflow_context *subflow;
1667 	struct net *net = sock_net(sk);
1668 	struct socket *sf;
1669 	int err;
1670 
1671 	/* un-accepted server sockets can reach here - on bad configuration
1672 	 * bail early to avoid greater trouble later
1673 	 */
1674 	if (unlikely(!sk->sk_socket))
1675 		return -EINVAL;
1676 
1677 	err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1678 	if (err)
1679 		return err;
1680 
1681 	lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1682 
1683 	err = security_mptcp_add_subflow(sk, sf->sk);
1684 	if (err)
1685 		goto err_free;
1686 
1687 	/* the newly created socket has to be in the same cgroup as its parent */
1688 	mptcp_attach_cgroup(sk, sf->sk);
1689 
1690 	/* kernel sockets do not by default acquire net ref, but TCP timer
1691 	 * needs it.
1692 	 * Update ns_tracker to current stack trace and refcounted tracker.
1693 	 */
1694 	__netns_tracker_free(net, &sf->sk->ns_tracker, false);
1695 	sf->sk->sk_net_refcnt = 1;
1696 	get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1697 	sock_inuse_add(net, 1);
1698 	err = tcp_set_ulp(sf->sk, "mptcp");
1699 	if (err)
1700 		goto err_free;
1701 
1702 	mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk);
1703 	release_sock(sf->sk);
1704 
1705 	/* the newly created socket really belongs to the owning MPTCP master
1706 	 * socket, even if for additional subflows the allocation is performed
1707 	 * by a kernel workqueue. Adjust inode references, so that the
1708 	 * procfs/diag interfaces really show this one belonging to the correct
1709 	 * user.
1710 	 */
1711 	SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1712 	SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1713 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1714 
1715 	subflow = mptcp_subflow_ctx(sf->sk);
1716 	pr_debug("subflow=%p", subflow);
1717 
1718 	*new_sock = sf;
1719 	sock_hold(sk);
1720 	subflow->conn = sk;
1721 	mptcp_subflow_ops_override(sf->sk);
1722 
1723 	return 0;
1724 
1725 err_free:
1726 	release_sock(sf->sk);
1727 	sock_release(sf);
1728 	return err;
1729 }
1730 
1731 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1732 							gfp_t priority)
1733 {
1734 	struct inet_connection_sock *icsk = inet_csk(sk);
1735 	struct mptcp_subflow_context *ctx;
1736 
1737 	ctx = kzalloc(sizeof(*ctx), priority);
1738 	if (!ctx)
1739 		return NULL;
1740 
1741 	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1742 	INIT_LIST_HEAD(&ctx->node);
1743 	INIT_LIST_HEAD(&ctx->delegated_node);
1744 
1745 	pr_debug("subflow=%p", ctx);
1746 
1747 	ctx->tcp_sock = sk;
1748 	WRITE_ONCE(ctx->local_id, -1);
1749 
1750 	return ctx;
1751 }
1752 
1753 static void __subflow_state_change(struct sock *sk)
1754 {
1755 	struct socket_wq *wq;
1756 
1757 	rcu_read_lock();
1758 	wq = rcu_dereference(sk->sk_wq);
1759 	if (skwq_has_sleeper(wq))
1760 		wake_up_interruptible_all(&wq->wait);
1761 	rcu_read_unlock();
1762 }
1763 
1764 static bool subflow_is_done(const struct sock *sk)
1765 {
1766 	return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1767 }
1768 
1769 static void subflow_state_change(struct sock *sk)
1770 {
1771 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1772 	struct sock *parent = subflow->conn;
1773 	struct mptcp_sock *msk;
1774 
1775 	__subflow_state_change(sk);
1776 
1777 	msk = mptcp_sk(parent);
1778 	if (subflow_simultaneous_connect(sk)) {
1779 		mptcp_do_fallback(sk);
1780 		pr_fallback(msk);
1781 		subflow->conn_finished = 1;
1782 		mptcp_propagate_state(parent, sk, subflow, NULL);
1783 	}
1784 
1785 	/* as recvmsg() does not acquire the subflow socket for ssk selection
1786 	 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1787 	 * the data available machinery here.
1788 	 */
1789 	if (mptcp_subflow_data_available(sk))
1790 		mptcp_data_ready(parent, sk);
1791 	else if (unlikely(sk->sk_err))
1792 		subflow_error_report(sk);
1793 
1794 	subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1795 
1796 	/* when the fallback subflow closes the rx side, trigger a 'dummy'
1797 	 * ingress data fin, so that the msk state will follow along
1798 	 */
1799 	if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1800 	    mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1801 		mptcp_schedule_work(parent);
1802 }
1803 
1804 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
1805 {
1806 	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1807 	struct request_sock *req, *head, *tail;
1808 	struct mptcp_subflow_context *subflow;
1809 	struct sock *sk, *ssk;
1810 
1811 	/* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
1812 	 * Splice the req list, so that accept() can not reach the pending ssk after
1813 	 * the listener socket is released below.
1814 	 */
1815 	spin_lock_bh(&queue->rskq_lock);
1816 	head = queue->rskq_accept_head;
1817 	tail = queue->rskq_accept_tail;
1818 	queue->rskq_accept_head = NULL;
1819 	queue->rskq_accept_tail = NULL;
1820 	spin_unlock_bh(&queue->rskq_lock);
1821 
1822 	if (!head)
1823 		return;
1824 
1825 	/* can't acquire the msk socket lock under the subflow one,
1826 	 * or will cause ABBA deadlock
1827 	 */
1828 	release_sock(listener_ssk);
1829 
1830 	for (req = head; req; req = req->dl_next) {
1831 		ssk = req->sk;
1832 		if (!sk_is_mptcp(ssk))
1833 			continue;
1834 
1835 		subflow = mptcp_subflow_ctx(ssk);
1836 		if (!subflow || !subflow->conn)
1837 			continue;
1838 
1839 		sk = subflow->conn;
1840 		sock_hold(sk);
1841 
1842 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1843 		__mptcp_unaccepted_force_close(sk);
1844 		release_sock(sk);
1845 
1846 		/* lockdep will report a false positive ABBA deadlock
1847 		 * between cancel_work_sync and the listener socket.
1848 		 * The involved locks belong to different sockets WRT
1849 		 * the existing AB chain.
1850 		 * Using a per socket key is problematic as key
1851 		 * deregistration requires process context and must be
1852 		 * performed at socket disposal time, in atomic
1853 		 * context.
1854 		 * Just tell lockdep to consider the listener socket
1855 		 * released here.
1856 		 */
1857 		mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
1858 		mptcp_cancel_work(sk);
1859 		mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1860 
1861 		sock_put(sk);
1862 	}
1863 
1864 	/* we are still under the listener msk socket lock */
1865 	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1866 
1867 	/* restore the listener queue, to let the TCP code clean it up */
1868 	spin_lock_bh(&queue->rskq_lock);
1869 	WARN_ON_ONCE(queue->rskq_accept_head);
1870 	queue->rskq_accept_head = head;
1871 	queue->rskq_accept_tail = tail;
1872 	spin_unlock_bh(&queue->rskq_lock);
1873 }
1874 
1875 static int subflow_ulp_init(struct sock *sk)
1876 {
1877 	struct inet_connection_sock *icsk = inet_csk(sk);
1878 	struct mptcp_subflow_context *ctx;
1879 	struct tcp_sock *tp = tcp_sk(sk);
1880 	int err = 0;
1881 
1882 	/* disallow attaching ULP to a socket unless it has been
1883 	 * created with sock_create_kern()
1884 	 */
1885 	if (!sk->sk_kern_sock) {
1886 		err = -EOPNOTSUPP;
1887 		goto out;
1888 	}
1889 
1890 	ctx = subflow_create_ctx(sk, GFP_KERNEL);
1891 	if (!ctx) {
1892 		err = -ENOMEM;
1893 		goto out;
1894 	}
1895 
1896 	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1897 
1898 	tp->is_mptcp = 1;
1899 	ctx->icsk_af_ops = icsk->icsk_af_ops;
1900 	icsk->icsk_af_ops = subflow_default_af_ops(sk);
1901 	ctx->tcp_state_change = sk->sk_state_change;
1902 	ctx->tcp_error_report = sk->sk_error_report;
1903 
1904 	WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1905 	WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1906 
1907 	sk->sk_data_ready = subflow_data_ready;
1908 	sk->sk_write_space = subflow_write_space;
1909 	sk->sk_state_change = subflow_state_change;
1910 	sk->sk_error_report = subflow_error_report;
1911 out:
1912 	return err;
1913 }
1914 
1915 static void subflow_ulp_release(struct sock *ssk)
1916 {
1917 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1918 	bool release = true;
1919 	struct sock *sk;
1920 
1921 	if (!ctx)
1922 		return;
1923 
1924 	sk = ctx->conn;
1925 	if (sk) {
1926 		/* if the msk has been orphaned, keep the ctx
1927 		 * alive, will be freed by __mptcp_close_ssk(),
1928 		 * when the subflow is still unaccepted
1929 		 */
1930 		release = ctx->disposable || list_empty(&ctx->node);
1931 
1932 		/* inet_child_forget() does not call sk_state_change(),
1933 		 * explicitly trigger the socket close machinery
1934 		 */
1935 		if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
1936 						  &mptcp_sk(sk)->flags))
1937 			mptcp_schedule_work(sk);
1938 		sock_put(sk);
1939 	}
1940 
1941 	mptcp_subflow_ops_undo_override(ssk);
1942 	if (release)
1943 		kfree_rcu(ctx, rcu);
1944 }
1945 
1946 static void subflow_ulp_clone(const struct request_sock *req,
1947 			      struct sock *newsk,
1948 			      const gfp_t priority)
1949 {
1950 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1951 	struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1952 	struct mptcp_subflow_context *new_ctx;
1953 
1954 	if (!tcp_rsk(req)->is_mptcp ||
1955 	    (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1956 		subflow_ulp_fallback(newsk, old_ctx);
1957 		return;
1958 	}
1959 
1960 	new_ctx = subflow_create_ctx(newsk, priority);
1961 	if (!new_ctx) {
1962 		subflow_ulp_fallback(newsk, old_ctx);
1963 		return;
1964 	}
1965 
1966 	new_ctx->conn_finished = 1;
1967 	new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1968 	new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1969 	new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1970 	new_ctx->rel_write_seq = 1;
1971 	new_ctx->tcp_sock = newsk;
1972 
1973 	if (subflow_req->mp_capable) {
1974 		/* see comments in subflow_syn_recv_sock(), MPTCP connection
1975 		 * is fully established only after we receive the remote key
1976 		 */
1977 		new_ctx->mp_capable = 1;
1978 		new_ctx->local_key = subflow_req->local_key;
1979 		new_ctx->token = subflow_req->token;
1980 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1981 		new_ctx->idsn = subflow_req->idsn;
1982 
1983 		/* this is the first subflow, id is always 0 */
1984 		subflow_set_local_id(new_ctx, 0);
1985 	} else if (subflow_req->mp_join) {
1986 		new_ctx->ssn_offset = subflow_req->ssn_offset;
1987 		new_ctx->mp_join = 1;
1988 		new_ctx->fully_established = 1;
1989 		new_ctx->remote_key_valid = 1;
1990 		new_ctx->backup = subflow_req->backup;
1991 		WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
1992 		new_ctx->token = subflow_req->token;
1993 		new_ctx->thmac = subflow_req->thmac;
1994 
1995 		/* the subflow req id is valid, fetched via subflow_check_req()
1996 		 * and subflow_token_join_request()
1997 		 */
1998 		subflow_set_local_id(new_ctx, subflow_req->local_id);
1999 	}
2000 }
2001 
2002 static void tcp_release_cb_override(struct sock *ssk)
2003 {
2004 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
2005 	long status;
2006 
2007 	/* process and clear all the pending actions, but leave the subflow into
2008 	 * the napi queue. To respect locking, only the same CPU that originated
2009 	 * the action can touch the list. mptcp_napi_poll will take care of it.
2010 	 */
2011 	status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
2012 	if (status)
2013 		mptcp_subflow_process_delegated(ssk, status);
2014 
2015 	tcp_release_cb(ssk);
2016 }
2017 
2018 static int tcp_abort_override(struct sock *ssk, int err)
2019 {
2020 	/* closing a listener subflow requires a great deal of care.
2021 	 * keep it simple and just prevent such operation
2022 	 */
2023 	if (inet_sk_state_load(ssk) == TCP_LISTEN)
2024 		return -EINVAL;
2025 
2026 	return tcp_abort(ssk, err);
2027 }
2028 
2029 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
2030 	.name		= "mptcp",
2031 	.owner		= THIS_MODULE,
2032 	.init		= subflow_ulp_init,
2033 	.release	= subflow_ulp_release,
2034 	.clone		= subflow_ulp_clone,
2035 };
2036 
2037 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
2038 {
2039 	subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
2040 
2041 	subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
2042 					      subflow_ops->obj_size, 0,
2043 					      SLAB_ACCOUNT |
2044 					      SLAB_TYPESAFE_BY_RCU,
2045 					      NULL);
2046 	if (!subflow_ops->slab)
2047 		return -ENOMEM;
2048 
2049 	return 0;
2050 }
2051 
2052 void __init mptcp_subflow_init(void)
2053 {
2054 	mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
2055 	mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
2056 	mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
2057 
2058 	if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
2059 		panic("MPTCP: failed to init subflow v4 request sock ops\n");
2060 
2061 	subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
2062 	subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
2063 	subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
2064 
2065 	subflow_specific = ipv4_specific;
2066 	subflow_specific.conn_request = subflow_v4_conn_request;
2067 	subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
2068 	subflow_specific.sk_rx_dst_set = subflow_finish_connect;
2069 	subflow_specific.rebuild_header = subflow_rebuild_header;
2070 
2071 	tcp_prot_override = tcp_prot;
2072 	tcp_prot_override.release_cb = tcp_release_cb_override;
2073 	tcp_prot_override.diag_destroy = tcp_abort_override;
2074 
2075 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2076 	/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
2077 	 * structures for v4 and v6 have the same size. It should not changed in
2078 	 * the future but better to make sure to be warned if it is no longer
2079 	 * the case.
2080 	 */
2081 	BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
2082 
2083 	mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
2084 	mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
2085 	mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
2086 
2087 	if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
2088 		panic("MPTCP: failed to init subflow v6 request sock ops\n");
2089 
2090 	subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
2091 	subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
2092 	subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
2093 
2094 	subflow_v6_specific = ipv6_specific;
2095 	subflow_v6_specific.conn_request = subflow_v6_conn_request;
2096 	subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
2097 	subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
2098 	subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
2099 
2100 	subflow_v6m_specific = subflow_v6_specific;
2101 	subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
2102 	subflow_v6m_specific.send_check = ipv4_specific.send_check;
2103 	subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
2104 	subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
2105 	subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
2106 
2107 	tcpv6_prot_override = tcpv6_prot;
2108 	tcpv6_prot_override.release_cb = tcp_release_cb_override;
2109 	tcpv6_prot_override.diag_destroy = tcp_abort_override;
2110 #endif
2111 
2112 	mptcp_diag_subflow_init(&subflow_ulp_ops);
2113 
2114 	if (tcp_register_ulp(&subflow_ulp_ops) != 0)
2115 		panic("MPTCP: failed to register subflows to ULP\n");
2116 }
2117