1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/sha2.h>
13 #include <crypto/utils.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
19 #include <net/ip6_route.h>
20 #include <net/transp_v6.h>
21 #endif
22 #include <net/mptcp.h>
23
24 #include "protocol.h"
25 #include "mib.h"
26
27 #include <trace/events/mptcp.h>
28 #include <trace/events/sock.h>
29
30 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
31
SUBFLOW_REQ_INC_STATS(struct request_sock * req,enum linux_mptcp_mib_field field)32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
33 enum linux_mptcp_mib_field field)
34 {
35 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
36 }
37
subflow_req_destructor(struct request_sock * req)38 static void subflow_req_destructor(struct request_sock *req)
39 {
40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
41
42 pr_debug("subflow_req=%p\n", subflow_req);
43
44 if (subflow_req->msk)
45 sock_put((struct sock *)subflow_req->msk);
46
47 mptcp_token_destroy_request(req);
48 }
49
subflow_generate_hmac(u64 key1,u64 key2,u32 nonce1,u32 nonce2,void * hmac)50 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
51 void *hmac)
52 {
53 u8 msg[8];
54
55 put_unaligned_be32(nonce1, &msg[0]);
56 put_unaligned_be32(nonce2, &msg[4]);
57
58 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
59 }
60
mptcp_can_accept_new_subflow(const struct mptcp_sock * msk)61 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
62 {
63 return mptcp_is_fully_established((void *)msk) &&
64 ((mptcp_pm_is_userspace(msk) &&
65 mptcp_userspace_pm_active(msk)) ||
66 READ_ONCE(msk->pm.accept_subflow));
67 }
68
69 /* validate received token and create truncated hmac and nonce for SYN-ACK */
subflow_req_create_thmac(struct mptcp_subflow_request_sock * subflow_req)70 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
71 {
72 struct mptcp_sock *msk = subflow_req->msk;
73 u8 hmac[SHA256_DIGEST_SIZE];
74
75 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
76
77 subflow_generate_hmac(READ_ONCE(msk->local_key),
78 READ_ONCE(msk->remote_key),
79 subflow_req->local_nonce,
80 subflow_req->remote_nonce, hmac);
81
82 subflow_req->thmac = get_unaligned_be64(hmac);
83 }
84
subflow_token_join_request(struct request_sock * req)85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
86 {
87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
88 struct mptcp_sock *msk;
89 int local_id;
90
91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 if (!msk) {
93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
94 return NULL;
95 }
96
97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
98 if (local_id < 0) {
99 sock_put((struct sock *)msk);
100 return NULL;
101 }
102 subflow_req->local_id = local_id;
103 subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
104
105 return msk;
106 }
107
subflow_init_req(struct request_sock * req,const struct sock * sk_listener)108 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
109 {
110 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
111
112 subflow_req->mp_capable = 0;
113 subflow_req->mp_join = 0;
114 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
115 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
116 subflow_req->msk = NULL;
117 mptcp_token_init_request(req);
118 }
119
subflow_use_different_sport(struct mptcp_sock * msk,const struct sock * sk)120 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
121 {
122 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
123 }
124
subflow_add_reset_reason(struct sk_buff * skb,u8 reason)125 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
126 {
127 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
128
129 if (mpext) {
130 memset(mpext, 0, sizeof(*mpext));
131 mpext->reset_reason = reason;
132 }
133 }
134
135 /* Init mptcp request socket.
136 *
137 * Returns an error code if a JOIN has failed and a TCP reset
138 * should be sent.
139 */
subflow_check_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb)140 static int subflow_check_req(struct request_sock *req,
141 const struct sock *sk_listener,
142 struct sk_buff *skb)
143 {
144 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
145 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
146 struct mptcp_options_received mp_opt;
147 bool opt_mp_capable, opt_mp_join;
148
149 pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
150
151 #ifdef CONFIG_TCP_MD5SIG
152 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
153 * TCP option space.
154 */
155 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) {
156 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
157 return -EINVAL;
158 }
159 #endif
160
161 mptcp_get_options(skb, &mp_opt);
162
163 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
164 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
165 if (opt_mp_capable) {
166 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
167
168 if (opt_mp_join)
169 return 0;
170 } else if (opt_mp_join) {
171 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
172
173 if (mp_opt.backup)
174 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
175 }
176
177 if (opt_mp_capable && listener->request_mptcp) {
178 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
179
180 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
181 again:
182 do {
183 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
184 } while (subflow_req->local_key == 0);
185
186 if (unlikely(req->syncookie)) {
187 mptcp_crypto_key_sha(subflow_req->local_key,
188 &subflow_req->token,
189 &subflow_req->idsn);
190 if (mptcp_token_exists(subflow_req->token)) {
191 if (retries-- > 0)
192 goto again;
193 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
194 } else {
195 subflow_req->mp_capable = 1;
196 }
197 return 0;
198 }
199
200 err = mptcp_token_new_request(req);
201 if (err == 0)
202 subflow_req->mp_capable = 1;
203 else if (retries-- > 0)
204 goto again;
205 else
206 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
207
208 } else if (opt_mp_join && listener->request_mptcp) {
209 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
210 subflow_req->mp_join = 1;
211 subflow_req->backup = mp_opt.backup;
212 subflow_req->remote_id = mp_opt.join_id;
213 subflow_req->token = mp_opt.token;
214 subflow_req->remote_nonce = mp_opt.nonce;
215 subflow_req->msk = subflow_token_join_request(req);
216
217 /* Can't fall back to TCP in this case. */
218 if (!subflow_req->msk) {
219 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
220 return -EPERM;
221 }
222
223 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
224 pr_debug("syn inet_sport=%d %d\n",
225 ntohs(inet_sk(sk_listener)->inet_sport),
226 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
227 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
228 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
229 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
230 return -EPERM;
231 }
232 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
233 }
234
235 subflow_req_create_thmac(subflow_req);
236
237 if (unlikely(req->syncookie)) {
238 if (!mptcp_can_accept_new_subflow(subflow_req->msk)) {
239 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
240 return -EPERM;
241 }
242
243 subflow_init_req_cookie_join_save(subflow_req, skb);
244 }
245
246 pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
247 subflow_req->remote_nonce, subflow_req->msk);
248 }
249
250 return 0;
251 }
252
mptcp_subflow_init_cookie_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb)253 int mptcp_subflow_init_cookie_req(struct request_sock *req,
254 const struct sock *sk_listener,
255 struct sk_buff *skb)
256 {
257 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
258 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
259 struct mptcp_options_received mp_opt;
260 bool opt_mp_capable, opt_mp_join;
261 int err;
262
263 subflow_init_req(req, sk_listener);
264 mptcp_get_options(skb, &mp_opt);
265
266 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
267 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
268 if (opt_mp_capable && opt_mp_join)
269 return -EINVAL;
270
271 if (opt_mp_capable && listener->request_mptcp) {
272 if (mp_opt.sndr_key == 0)
273 return -EINVAL;
274
275 subflow_req->local_key = mp_opt.rcvr_key;
276 err = mptcp_token_new_request(req);
277 if (err)
278 return err;
279
280 subflow_req->mp_capable = 1;
281 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
282 } else if (opt_mp_join && listener->request_mptcp) {
283 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
284 return -EINVAL;
285
286 subflow_req->mp_join = 1;
287 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
288 }
289
290 return 0;
291 }
292 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
293
mptcp_get_rst_reason(const struct sk_buff * skb)294 static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb)
295 {
296 const struct mptcp_ext *mpext = mptcp_get_ext(skb);
297
298 if (!mpext)
299 return SK_RST_REASON_NOT_SPECIFIED;
300
301 return sk_rst_convert_mptcp_reason(mpext->reset_reason);
302 }
303
subflow_v4_route_req(const struct sock * sk,struct sk_buff * skb,struct flowi * fl,struct request_sock * req,u32 tw_isn)304 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
305 struct sk_buff *skb,
306 struct flowi *fl,
307 struct request_sock *req,
308 u32 tw_isn)
309 {
310 struct dst_entry *dst;
311 int err;
312
313 tcp_rsk(req)->is_mptcp = 1;
314 subflow_init_req(req, sk);
315
316 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn);
317 if (!dst)
318 return NULL;
319
320 err = subflow_check_req(req, sk, skb);
321 if (err == 0)
322 return dst;
323
324 dst_release(dst);
325 if (!req->syncookie)
326 tcp_request_sock_ops.send_reset(sk, skb,
327 mptcp_get_rst_reason(skb));
328 return NULL;
329 }
330
subflow_prep_synack(const struct sock * sk,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type)331 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
332 struct tcp_fastopen_cookie *foc,
333 enum tcp_synack_type synack_type)
334 {
335 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
336 struct inet_request_sock *ireq = inet_rsk(req);
337
338 /* clear tstamp_ok, as needed depending on cookie */
339 if (foc && foc->len > -1)
340 ireq->tstamp_ok = 0;
341
342 if (synack_type == TCP_SYNACK_FASTOPEN)
343 mptcp_fastopen_subflow_synack_set_params(subflow, req);
344 }
345
subflow_v4_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)346 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
347 struct flowi *fl,
348 struct request_sock *req,
349 struct tcp_fastopen_cookie *foc,
350 enum tcp_synack_type synack_type,
351 struct sk_buff *syn_skb)
352 {
353 subflow_prep_synack(sk, req, foc, synack_type);
354
355 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
356 synack_type, syn_skb);
357 }
358
359 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_v6_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)360 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
361 struct flowi *fl,
362 struct request_sock *req,
363 struct tcp_fastopen_cookie *foc,
364 enum tcp_synack_type synack_type,
365 struct sk_buff *syn_skb)
366 {
367 subflow_prep_synack(sk, req, foc, synack_type);
368
369 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
370 synack_type, syn_skb);
371 }
372
subflow_v6_route_req(const struct sock * sk,struct sk_buff * skb,struct flowi * fl,struct request_sock * req,u32 tw_isn)373 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
374 struct sk_buff *skb,
375 struct flowi *fl,
376 struct request_sock *req,
377 u32 tw_isn)
378 {
379 struct dst_entry *dst;
380 int err;
381
382 tcp_rsk(req)->is_mptcp = 1;
383 subflow_init_req(req, sk);
384
385 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn);
386 if (!dst)
387 return NULL;
388
389 err = subflow_check_req(req, sk, skb);
390 if (err == 0)
391 return dst;
392
393 dst_release(dst);
394 if (!req->syncookie)
395 tcp6_request_sock_ops.send_reset(sk, skb,
396 mptcp_get_rst_reason(skb));
397 return NULL;
398 }
399 #endif
400
401 /* validate received truncated hmac and create hmac for third ACK */
subflow_thmac_valid(struct mptcp_subflow_context * subflow)402 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
403 {
404 u8 hmac[SHA256_DIGEST_SIZE];
405 u64 thmac;
406
407 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
408 subflow->remote_nonce, subflow->local_nonce,
409 hmac);
410
411 thmac = get_unaligned_be64(hmac);
412 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
413 subflow, subflow->token, thmac, subflow->thmac);
414
415 return thmac == subflow->thmac;
416 }
417
mptcp_subflow_reset(struct sock * ssk)418 void mptcp_subflow_reset(struct sock *ssk)
419 {
420 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
421 struct sock *sk = subflow->conn;
422
423 /* mptcp_mp_fail_no_response() can reach here on an already closed
424 * socket
425 */
426 if (ssk->sk_state == TCP_CLOSE)
427 return;
428
429 /* must hold: tcp_done() could drop last reference on parent */
430 sock_hold(sk);
431
432 mptcp_send_active_reset_reason(ssk);
433 tcp_done(ssk);
434 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
435 mptcp_schedule_work(sk);
436
437 sock_put(sk);
438 }
439
subflow_use_different_dport(struct mptcp_sock * msk,const struct sock * sk)440 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
441 {
442 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
443 }
444
__mptcp_sync_state(struct sock * sk,int state)445 void __mptcp_sync_state(struct sock *sk, int state)
446 {
447 struct mptcp_subflow_context *subflow;
448 struct mptcp_sock *msk = mptcp_sk(sk);
449 struct sock *ssk = msk->first;
450
451 subflow = mptcp_subflow_ctx(ssk);
452 __mptcp_propagate_sndbuf(sk, ssk);
453 if (!msk->rcvspace_init)
454 mptcp_rcv_space_init(msk, ssk);
455
456 if (sk->sk_state == TCP_SYN_SENT) {
457 /* subflow->idsn is always available is TCP_SYN_SENT state,
458 * even for the FASTOPEN scenarios
459 */
460 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
461 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
462 mptcp_set_state(sk, state);
463 sk->sk_state_change(sk);
464 }
465 }
466
subflow_set_remote_key(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,const struct mptcp_options_received * mp_opt)467 static void subflow_set_remote_key(struct mptcp_sock *msk,
468 struct mptcp_subflow_context *subflow,
469 const struct mptcp_options_received *mp_opt)
470 {
471 /* active MPC subflow will reach here multiple times:
472 * at subflow_finish_connect() time and at 4th ack time
473 */
474 if (subflow->remote_key_valid)
475 return;
476
477 subflow->remote_key_valid = 1;
478 subflow->remote_key = mp_opt->sndr_key;
479 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
480 subflow->iasn++;
481
482 WRITE_ONCE(msk->remote_key, subflow->remote_key);
483 WRITE_ONCE(msk->ack_seq, subflow->iasn);
484 WRITE_ONCE(msk->can_ack, true);
485 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
486 }
487
mptcp_propagate_state(struct sock * sk,struct sock * ssk,struct mptcp_subflow_context * subflow,const struct mptcp_options_received * mp_opt)488 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
489 struct mptcp_subflow_context *subflow,
490 const struct mptcp_options_received *mp_opt)
491 {
492 struct mptcp_sock *msk = mptcp_sk(sk);
493
494 mptcp_data_lock(sk);
495 if (mp_opt) {
496 /* Options are available only in the non fallback cases
497 * avoid updating rx path fields otherwise
498 */
499 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
500 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
501 subflow_set_remote_key(msk, subflow, mp_opt);
502 }
503
504 if (!sock_owned_by_user(sk)) {
505 __mptcp_sync_state(sk, ssk->sk_state);
506 } else {
507 msk->pending_state = ssk->sk_state;
508 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
509 }
510 mptcp_data_unlock(sk);
511 }
512
subflow_finish_connect(struct sock * sk,const struct sk_buff * skb)513 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
514 {
515 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
516 struct mptcp_options_received mp_opt;
517 struct sock *parent = subflow->conn;
518 struct mptcp_sock *msk;
519
520 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
521
522 /* be sure no special action on any packet other than syn-ack */
523 if (subflow->conn_finished)
524 return;
525
526 msk = mptcp_sk(parent);
527 subflow->rel_write_seq = 1;
528 subflow->conn_finished = 1;
529 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
530 pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
531
532 mptcp_get_options(skb, &mp_opt);
533 if (subflow->request_mptcp) {
534 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
535 MPTCP_INC_STATS(sock_net(sk),
536 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
537 mptcp_do_fallback(sk);
538 pr_fallback(msk);
539 goto fallback;
540 }
541
542 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
543 WRITE_ONCE(msk->csum_enabled, true);
544 if (mp_opt.deny_join_id0)
545 WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
546 subflow->mp_capable = 1;
547 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
548 mptcp_finish_connect(sk);
549 mptcp_active_enable(parent);
550 mptcp_propagate_state(parent, sk, subflow, &mp_opt);
551 } else if (subflow->request_join) {
552 u8 hmac[SHA256_DIGEST_SIZE];
553
554 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
555 subflow->reset_reason = MPTCP_RST_EMPTCP;
556 goto do_reset;
557 }
558
559 subflow->backup = mp_opt.backup;
560 subflow->thmac = mp_opt.thmac;
561 subflow->remote_nonce = mp_opt.nonce;
562 WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
563 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
564 subflow, subflow->thmac, subflow->remote_nonce,
565 subflow->backup);
566
567 if (!subflow_thmac_valid(subflow)) {
568 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
569 subflow->reset_reason = MPTCP_RST_EMPTCP;
570 goto do_reset;
571 }
572
573 if (!mptcp_finish_join(sk))
574 goto do_reset;
575
576 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
577 subflow->local_nonce,
578 subflow->remote_nonce,
579 hmac);
580 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
581
582 subflow->mp_join = 1;
583 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
584
585 if (subflow->backup)
586 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
587
588 if (subflow_use_different_dport(msk, sk)) {
589 pr_debug("synack inet_dport=%d %d\n",
590 ntohs(inet_sk(sk)->inet_dport),
591 ntohs(inet_sk(parent)->inet_dport));
592 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
593 }
594 } else if (mptcp_check_fallback(sk)) {
595 /* It looks like MPTCP is blocked, while TCP is not */
596 if (subflow->mpc_drop)
597 mptcp_active_disable(parent);
598 fallback:
599 mptcp_propagate_state(parent, sk, subflow, NULL);
600 }
601 return;
602
603 do_reset:
604 subflow->reset_transient = 0;
605 mptcp_subflow_reset(sk);
606 }
607
subflow_set_local_id(struct mptcp_subflow_context * subflow,int local_id)608 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
609 {
610 WARN_ON_ONCE(local_id < 0 || local_id > 255);
611 WRITE_ONCE(subflow->local_id, local_id);
612 }
613
subflow_chk_local_id(struct sock * sk)614 static int subflow_chk_local_id(struct sock *sk)
615 {
616 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
617 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
618 int err;
619
620 if (likely(subflow->local_id >= 0))
621 return 0;
622
623 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
624 if (err < 0)
625 return err;
626
627 subflow_set_local_id(subflow, err);
628 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
629
630 return 0;
631 }
632
subflow_rebuild_header(struct sock * sk)633 static int subflow_rebuild_header(struct sock *sk)
634 {
635 int err = subflow_chk_local_id(sk);
636
637 if (unlikely(err < 0))
638 return err;
639
640 return inet_sk_rebuild_header(sk);
641 }
642
643 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_v6_rebuild_header(struct sock * sk)644 static int subflow_v6_rebuild_header(struct sock *sk)
645 {
646 int err = subflow_chk_local_id(sk);
647
648 if (unlikely(err < 0))
649 return err;
650
651 return inet6_sk_rebuild_header(sk);
652 }
653 #endif
654
655 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
656 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
657
subflow_v4_conn_request(struct sock * sk,struct sk_buff * skb)658 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
659 {
660 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
661
662 pr_debug("subflow=%p\n", subflow);
663
664 /* Never answer to SYNs sent to broadcast or multicast */
665 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
666 goto drop;
667
668 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
669 &subflow_request_sock_ipv4_ops,
670 sk, skb);
671 drop:
672 tcp_listendrop(sk);
673 return 0;
674 }
675
subflow_v4_req_destructor(struct request_sock * req)676 static void subflow_v4_req_destructor(struct request_sock *req)
677 {
678 subflow_req_destructor(req);
679 tcp_request_sock_ops.destructor(req);
680 }
681
682 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
683 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
684 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
685 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
686 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
687 static struct proto tcpv6_prot_override __ro_after_init;
688
subflow_v6_conn_request(struct sock * sk,struct sk_buff * skb)689 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
690 {
691 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
692
693 pr_debug("subflow=%p\n", subflow);
694
695 if (skb->protocol == htons(ETH_P_IP))
696 return subflow_v4_conn_request(sk, skb);
697
698 if (!ipv6_unicast_destination(skb))
699 goto drop;
700
701 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
702 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
703 return 0;
704 }
705
706 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
707 &subflow_request_sock_ipv6_ops, sk, skb);
708
709 drop:
710 tcp_listendrop(sk);
711 return 0; /* don't send reset */
712 }
713
subflow_v6_req_destructor(struct request_sock * req)714 static void subflow_v6_req_destructor(struct request_sock *req)
715 {
716 subflow_req_destructor(req);
717 tcp6_request_sock_ops.destructor(req);
718 }
719 #endif
720
mptcp_subflow_reqsk_alloc(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)721 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
722 struct sock *sk_listener,
723 bool attach_listener)
724 {
725 if (ops->family == AF_INET)
726 ops = &mptcp_subflow_v4_request_sock_ops;
727 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
728 else if (ops->family == AF_INET6)
729 ops = &mptcp_subflow_v6_request_sock_ops;
730 #endif
731
732 return inet_reqsk_alloc(ops, sk_listener, attach_listener);
733 }
734 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
735
736 /* validate hmac received in third ACK */
subflow_hmac_valid(const struct request_sock * req,const struct mptcp_options_received * mp_opt)737 static bool subflow_hmac_valid(const struct request_sock *req,
738 const struct mptcp_options_received *mp_opt)
739 {
740 const struct mptcp_subflow_request_sock *subflow_req;
741 u8 hmac[SHA256_DIGEST_SIZE];
742 struct mptcp_sock *msk;
743
744 subflow_req = mptcp_subflow_rsk(req);
745 msk = subflow_req->msk;
746 if (!msk)
747 return false;
748
749 subflow_generate_hmac(READ_ONCE(msk->remote_key),
750 READ_ONCE(msk->local_key),
751 subflow_req->remote_nonce,
752 subflow_req->local_nonce, hmac);
753
754 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
755 }
756
subflow_ulp_fallback(struct sock * sk,struct mptcp_subflow_context * old_ctx)757 static void subflow_ulp_fallback(struct sock *sk,
758 struct mptcp_subflow_context *old_ctx)
759 {
760 struct inet_connection_sock *icsk = inet_csk(sk);
761
762 mptcp_subflow_tcp_fallback(sk, old_ctx);
763 icsk->icsk_ulp_ops = NULL;
764 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
765 tcp_sk(sk)->is_mptcp = 0;
766
767 mptcp_subflow_ops_undo_override(sk);
768 }
769
mptcp_subflow_drop_ctx(struct sock * ssk)770 void mptcp_subflow_drop_ctx(struct sock *ssk)
771 {
772 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
773
774 if (!ctx)
775 return;
776
777 list_del(&mptcp_subflow_ctx(ssk)->node);
778 if (inet_csk(ssk)->icsk_ulp_ops) {
779 subflow_ulp_fallback(ssk, ctx);
780 if (ctx->conn)
781 sock_put(ctx->conn);
782 }
783
784 kfree_rcu(ctx, rcu);
785 }
786
__mptcp_subflow_fully_established(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,const struct mptcp_options_received * mp_opt)787 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
788 struct mptcp_subflow_context *subflow,
789 const struct mptcp_options_received *mp_opt)
790 {
791 subflow_set_remote_key(msk, subflow, mp_opt);
792 subflow->fully_established = 1;
793 WRITE_ONCE(msk->fully_established, true);
794
795 if (subflow->is_mptfo)
796 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
797 }
798
subflow_syn_recv_sock(const struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst,struct request_sock * req_unhash,bool * own_req)799 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
800 struct sk_buff *skb,
801 struct request_sock *req,
802 struct dst_entry *dst,
803 struct request_sock *req_unhash,
804 bool *own_req)
805 {
806 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
807 struct mptcp_subflow_request_sock *subflow_req;
808 struct mptcp_options_received mp_opt;
809 bool fallback, fallback_is_fatal;
810 enum sk_rst_reason reason;
811 struct mptcp_sock *owner;
812 struct sock *child;
813
814 pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
815
816 /* After child creation we must look for MPC even when options
817 * are not parsed
818 */
819 mp_opt.suboptions = 0;
820
821 /* hopefully temporary handling for MP_JOIN+syncookie */
822 subflow_req = mptcp_subflow_rsk(req);
823 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
824 fallback = !tcp_rsk(req)->is_mptcp;
825 if (fallback)
826 goto create_child;
827
828 /* if the sk is MP_CAPABLE, we try to fetch the client key */
829 if (subflow_req->mp_capable) {
830 /* we can receive and accept an in-window, out-of-order pkt,
831 * which may not carry the MP_CAPABLE opt even on mptcp enabled
832 * paths: always try to extract the peer key, and fallback
833 * for packets missing it.
834 * Even OoO DSS packets coming legitly after dropped or
835 * reordered MPC will cause fallback, but we don't have other
836 * options.
837 */
838 mptcp_get_options(skb, &mp_opt);
839 if (!(mp_opt.suboptions &
840 (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
841 fallback = true;
842
843 } else if (subflow_req->mp_join) {
844 mptcp_get_options(skb, &mp_opt);
845 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
846 !subflow_hmac_valid(req, &mp_opt) ||
847 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
848 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
849 fallback = true;
850 }
851 }
852
853 create_child:
854 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
855 req_unhash, own_req);
856
857 if (child && *own_req) {
858 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
859
860 tcp_rsk(req)->drop_req = false;
861
862 /* we need to fallback on ctx allocation failure and on pre-reqs
863 * checking above. In the latter scenario we additionally need
864 * to reset the context to non MPTCP status.
865 */
866 if (!ctx || fallback) {
867 if (fallback_is_fatal) {
868 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
869 goto dispose_child;
870 }
871 goto fallback;
872 }
873
874 /* ssk inherits options of listener sk */
875 ctx->setsockopt_seq = listener->setsockopt_seq;
876
877 if (ctx->mp_capable) {
878 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
879 if (!ctx->conn)
880 goto fallback;
881
882 ctx->subflow_id = 1;
883 owner = mptcp_sk(ctx->conn);
884 mptcp_pm_new_connection(owner, child, 1);
885
886 /* with OoO packets we can reach here without ingress
887 * mpc option
888 */
889 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
890 mptcp_pm_fully_established(owner, child);
891 ctx->pm_notified = 1;
892 }
893 } else if (ctx->mp_join) {
894 owner = subflow_req->msk;
895 if (!owner) {
896 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
897 goto dispose_child;
898 }
899
900 /* move the msk reference ownership to the subflow */
901 subflow_req->msk = NULL;
902 ctx->conn = (struct sock *)owner;
903
904 if (subflow_use_different_sport(owner, sk)) {
905 pr_debug("ack inet_sport=%d %d\n",
906 ntohs(inet_sk(sk)->inet_sport),
907 ntohs(inet_sk((struct sock *)owner)->inet_sport));
908 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
909 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
910 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
911 goto dispose_child;
912 }
913 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
914 }
915
916 if (!mptcp_finish_join(child)) {
917 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child);
918
919 subflow_add_reset_reason(skb, subflow->reset_reason);
920 goto dispose_child;
921 }
922
923 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
924 tcp_rsk(req)->drop_req = true;
925 }
926 }
927
928 /* check for expected invariant - should never trigger, just help
929 * catching earlier subtle bugs
930 */
931 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
932 (!mptcp_subflow_ctx(child) ||
933 !mptcp_subflow_ctx(child)->conn));
934 return child;
935
936 dispose_child:
937 mptcp_subflow_drop_ctx(child);
938 tcp_rsk(req)->drop_req = true;
939 inet_csk_prepare_for_destroy_sock(child);
940 tcp_done(child);
941 reason = mptcp_get_rst_reason(skb);
942 req->rsk_ops->send_reset(sk, skb, reason);
943
944 /* The last child reference will be released by the caller */
945 return child;
946
947 fallback:
948 if (fallback)
949 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
950 mptcp_subflow_drop_ctx(child);
951 return child;
952 }
953
954 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
955 static struct proto tcp_prot_override __ro_after_init;
956
957 enum mapping_status {
958 MAPPING_OK,
959 MAPPING_INVALID,
960 MAPPING_EMPTY,
961 MAPPING_DATA_FIN,
962 MAPPING_DUMMY,
963 MAPPING_BAD_CSUM
964 };
965
dbg_bad_map(struct mptcp_subflow_context * subflow,u32 ssn)966 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
967 {
968 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
969 ssn, subflow->map_subflow_seq, subflow->map_data_len);
970 }
971
skb_is_fully_mapped(struct sock * ssk,struct sk_buff * skb)972 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
973 {
974 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
975 unsigned int skb_consumed;
976
977 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
978 if (WARN_ON_ONCE(skb_consumed >= skb->len))
979 return true;
980
981 return skb->len - skb_consumed <= subflow->map_data_len -
982 mptcp_subflow_get_map_offset(subflow);
983 }
984
validate_mapping(struct sock * ssk,struct sk_buff * skb)985 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
986 {
987 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
988 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
989
990 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
991 /* Mapping covers data later in the subflow stream,
992 * currently unsupported.
993 */
994 dbg_bad_map(subflow, ssn);
995 return false;
996 }
997 if (unlikely(!before(ssn, subflow->map_subflow_seq +
998 subflow->map_data_len))) {
999 /* Mapping does covers past subflow data, invalid */
1000 dbg_bad_map(subflow, ssn);
1001 return false;
1002 }
1003 return true;
1004 }
1005
validate_data_csum(struct sock * ssk,struct sk_buff * skb,bool csum_reqd)1006 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
1007 bool csum_reqd)
1008 {
1009 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1010 u32 offset, seq, delta;
1011 __sum16 csum;
1012 int len;
1013
1014 if (!csum_reqd)
1015 return MAPPING_OK;
1016
1017 /* mapping already validated on previous traversal */
1018 if (subflow->map_csum_len == subflow->map_data_len)
1019 return MAPPING_OK;
1020
1021 /* traverse the receive queue, ensuring it contains a full
1022 * DSS mapping and accumulating the related csum.
1023 * Preserve the accoumlate csum across multiple calls, to compute
1024 * the csum only once
1025 */
1026 delta = subflow->map_data_len - subflow->map_csum_len;
1027 for (;;) {
1028 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
1029 offset = seq - TCP_SKB_CB(skb)->seq;
1030
1031 /* if the current skb has not been accounted yet, csum its contents
1032 * up to the amount covered by the current DSS
1033 */
1034 if (offset < skb->len) {
1035 __wsum csum;
1036
1037 len = min(skb->len - offset, delta);
1038 csum = skb_checksum(skb, offset, len, 0);
1039 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
1040 subflow->map_csum_len);
1041
1042 delta -= len;
1043 subflow->map_csum_len += len;
1044 }
1045 if (delta == 0)
1046 break;
1047
1048 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
1049 /* if this subflow is closed, the partial mapping
1050 * will be never completed; flush the pending skbs, so
1051 * that subflow_sched_work_if_closed() can kick in
1052 */
1053 if (unlikely(ssk->sk_state == TCP_CLOSE))
1054 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1055 sk_eat_skb(ssk, skb);
1056
1057 /* not enough data to validate the csum */
1058 return MAPPING_EMPTY;
1059 }
1060
1061 /* the DSS mapping for next skbs will be validated later,
1062 * when a get_mapping_status call will process such skb
1063 */
1064 skb = skb->next;
1065 }
1066
1067 /* note that 'map_data_len' accounts only for the carried data, does
1068 * not include the eventual seq increment due to the data fin,
1069 * while the pseudo header requires the original DSS data len,
1070 * including that
1071 */
1072 csum = __mptcp_make_csum(subflow->map_seq,
1073 subflow->map_subflow_seq,
1074 subflow->map_data_len + subflow->map_data_fin,
1075 subflow->map_data_csum);
1076 if (unlikely(csum)) {
1077 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1078 return MAPPING_BAD_CSUM;
1079 }
1080
1081 subflow->valid_csum_seen = 1;
1082 return MAPPING_OK;
1083 }
1084
get_mapping_status(struct sock * ssk,struct mptcp_sock * msk)1085 static enum mapping_status get_mapping_status(struct sock *ssk,
1086 struct mptcp_sock *msk)
1087 {
1088 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1089 bool csum_reqd = READ_ONCE(msk->csum_enabled);
1090 struct mptcp_ext *mpext;
1091 struct sk_buff *skb;
1092 u16 data_len;
1093 u64 map_seq;
1094
1095 skb = skb_peek(&ssk->sk_receive_queue);
1096 if (!skb)
1097 return MAPPING_EMPTY;
1098
1099 if (mptcp_check_fallback(ssk))
1100 return MAPPING_DUMMY;
1101
1102 mpext = mptcp_get_ext(skb);
1103 if (!mpext || !mpext->use_map) {
1104 if (!subflow->map_valid && !skb->len) {
1105 /* the TCP stack deliver 0 len FIN pkt to the receive
1106 * queue, that is the only 0len pkts ever expected here,
1107 * and we can admit no mapping only for 0 len pkts
1108 */
1109 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1110 WARN_ONCE(1, "0len seq %d:%d flags %x",
1111 TCP_SKB_CB(skb)->seq,
1112 TCP_SKB_CB(skb)->end_seq,
1113 TCP_SKB_CB(skb)->tcp_flags);
1114 sk_eat_skb(ssk, skb);
1115 return MAPPING_EMPTY;
1116 }
1117
1118 if (!subflow->map_valid)
1119 return MAPPING_INVALID;
1120
1121 goto validate_seq;
1122 }
1123
1124 trace_get_mapping_status(mpext);
1125
1126 data_len = mpext->data_len;
1127 if (data_len == 0) {
1128 pr_debug("infinite mapping received\n");
1129 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1130 subflow->map_data_len = 0;
1131 return MAPPING_INVALID;
1132 }
1133
1134 if (mpext->data_fin == 1) {
1135 u64 data_fin_seq;
1136
1137 if (data_len == 1) {
1138 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1139 mpext->dsn64);
1140 pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
1141 if (subflow->map_valid) {
1142 /* A DATA_FIN might arrive in a DSS
1143 * option before the previous mapping
1144 * has been fully consumed. Continue
1145 * handling the existing mapping.
1146 */
1147 skb_ext_del(skb, SKB_EXT_MPTCP);
1148 return MAPPING_OK;
1149 }
1150
1151 if (updated)
1152 mptcp_schedule_work((struct sock *)msk);
1153
1154 return MAPPING_DATA_FIN;
1155 }
1156
1157 data_fin_seq = mpext->data_seq + data_len - 1;
1158
1159 /* If mpext->data_seq is a 32-bit value, data_fin_seq must also
1160 * be limited to 32 bits.
1161 */
1162 if (!mpext->dsn64)
1163 data_fin_seq &= GENMASK_ULL(31, 0);
1164
1165 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1166 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
1167 data_fin_seq, mpext->dsn64);
1168
1169 /* Adjust for DATA_FIN using 1 byte of sequence space */
1170 data_len--;
1171 }
1172
1173 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1174 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1175
1176 if (subflow->map_valid) {
1177 /* Allow replacing only with an identical map */
1178 if (subflow->map_seq == map_seq &&
1179 subflow->map_subflow_seq == mpext->subflow_seq &&
1180 subflow->map_data_len == data_len &&
1181 subflow->map_csum_reqd == mpext->csum_reqd) {
1182 skb_ext_del(skb, SKB_EXT_MPTCP);
1183 goto validate_csum;
1184 }
1185
1186 /* If this skb data are fully covered by the current mapping,
1187 * the new map would need caching, which is not supported
1188 */
1189 if (skb_is_fully_mapped(ssk, skb)) {
1190 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1191 return MAPPING_INVALID;
1192 }
1193
1194 /* will validate the next map after consuming the current one */
1195 goto validate_csum;
1196 }
1197
1198 subflow->map_seq = map_seq;
1199 subflow->map_subflow_seq = mpext->subflow_seq;
1200 subflow->map_data_len = data_len;
1201 subflow->map_valid = 1;
1202 subflow->map_data_fin = mpext->data_fin;
1203 subflow->mpc_map = mpext->mpc_map;
1204 subflow->map_csum_reqd = mpext->csum_reqd;
1205 subflow->map_csum_len = 0;
1206 subflow->map_data_csum = csum_unfold(mpext->csum);
1207
1208 /* Cfr RFC 8684 Section 3.3.0 */
1209 if (unlikely(subflow->map_csum_reqd != csum_reqd))
1210 return MAPPING_INVALID;
1211
1212 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
1213 subflow->map_seq, subflow->map_subflow_seq,
1214 subflow->map_data_len, subflow->map_csum_reqd,
1215 subflow->map_data_csum);
1216
1217 validate_seq:
1218 /* we revalidate valid mapping on new skb, because we must ensure
1219 * the current skb is completely covered by the available mapping
1220 */
1221 if (!validate_mapping(ssk, skb)) {
1222 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1223 return MAPPING_INVALID;
1224 }
1225
1226 skb_ext_del(skb, SKB_EXT_MPTCP);
1227
1228 validate_csum:
1229 return validate_data_csum(ssk, skb, csum_reqd);
1230 }
1231
mptcp_subflow_discard_data(struct sock * ssk,struct sk_buff * skb,u64 limit)1232 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1233 u64 limit)
1234 {
1235 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1236 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1237 struct tcp_sock *tp = tcp_sk(ssk);
1238 u32 offset, incr, avail_len;
1239
1240 offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
1241 if (WARN_ON_ONCE(offset > skb->len))
1242 goto out;
1243
1244 avail_len = skb->len - offset;
1245 incr = limit >= avail_len ? avail_len + fin : limit;
1246
1247 pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
1248 offset, subflow->map_subflow_seq);
1249 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1250 tcp_sk(ssk)->copied_seq += incr;
1251
1252 out:
1253 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1254 sk_eat_skb(ssk, skb);
1255 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1256 subflow->map_valid = 0;
1257 }
1258
1259 /* sched mptcp worker to remove the subflow if no more data is pending */
subflow_sched_work_if_closed(struct mptcp_sock * msk,struct sock * ssk)1260 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1261 {
1262 struct sock *sk = (struct sock *)msk;
1263
1264 if (likely(ssk->sk_state != TCP_CLOSE &&
1265 (ssk->sk_state != TCP_CLOSE_WAIT ||
1266 inet_sk_state_load(sk) != TCP_ESTABLISHED)))
1267 return;
1268
1269 if (skb_queue_empty(&ssk->sk_receive_queue) &&
1270 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1271 mptcp_schedule_work(sk);
1272 }
1273
subflow_can_fallback(struct mptcp_subflow_context * subflow)1274 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1275 {
1276 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1277
1278 if (subflow->mp_join)
1279 return false;
1280 else if (READ_ONCE(msk->csum_enabled))
1281 return !subflow->valid_csum_seen;
1282 else
1283 return !subflow->fully_established;
1284 }
1285
mptcp_subflow_fail(struct mptcp_sock * msk,struct sock * ssk)1286 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1287 {
1288 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1289 unsigned long fail_tout;
1290
1291 /* graceful failure can happen only on the MPC subflow */
1292 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1293 return;
1294
1295 /* since the close timeout take precedence on the fail one,
1296 * no need to start the latter when the first is already set
1297 */
1298 if (sock_flag((struct sock *)msk, SOCK_DEAD))
1299 return;
1300
1301 /* we don't need extreme accuracy here, use a zero fail_tout as special
1302 * value meaning no fail timeout at all;
1303 */
1304 fail_tout = jiffies + TCP_RTO_MAX;
1305 if (!fail_tout)
1306 fail_tout = 1;
1307 WRITE_ONCE(subflow->fail_tout, fail_tout);
1308 tcp_send_ack(ssk);
1309
1310 mptcp_reset_tout_timer(msk, subflow->fail_tout);
1311 }
1312
subflow_check_data_avail(struct sock * ssk)1313 static bool subflow_check_data_avail(struct sock *ssk)
1314 {
1315 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1316 enum mapping_status status;
1317 struct mptcp_sock *msk;
1318 struct sk_buff *skb;
1319
1320 if (!skb_peek(&ssk->sk_receive_queue))
1321 WRITE_ONCE(subflow->data_avail, false);
1322 if (subflow->data_avail)
1323 return true;
1324
1325 msk = mptcp_sk(subflow->conn);
1326 for (;;) {
1327 u64 ack_seq;
1328 u64 old_ack;
1329
1330 status = get_mapping_status(ssk, msk);
1331 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1332 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1333 status == MAPPING_BAD_CSUM))
1334 goto fallback;
1335
1336 if (status != MAPPING_OK)
1337 goto no_data;
1338
1339 skb = skb_peek(&ssk->sk_receive_queue);
1340 if (WARN_ON_ONCE(!skb))
1341 goto no_data;
1342
1343 if (unlikely(!READ_ONCE(msk->can_ack)))
1344 goto fallback;
1345
1346 old_ack = READ_ONCE(msk->ack_seq);
1347 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1348 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
1349 ack_seq);
1350 if (unlikely(before64(ack_seq, old_ack))) {
1351 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1352 continue;
1353 }
1354
1355 WRITE_ONCE(subflow->data_avail, true);
1356 break;
1357 }
1358 return true;
1359
1360 no_data:
1361 subflow_sched_work_if_closed(msk, ssk);
1362 return false;
1363
1364 fallback:
1365 if (!__mptcp_check_fallback(msk)) {
1366 /* RFC 8684 section 3.7. */
1367 if (status == MAPPING_BAD_CSUM &&
1368 (subflow->mp_join || subflow->valid_csum_seen)) {
1369 subflow->send_mp_fail = 1;
1370
1371 if (!READ_ONCE(msk->allow_infinite_fallback)) {
1372 subflow->reset_transient = 0;
1373 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1374 goto reset;
1375 }
1376 mptcp_subflow_fail(msk, ssk);
1377 WRITE_ONCE(subflow->data_avail, true);
1378 return true;
1379 }
1380
1381 if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1382 /* fatal protocol error, close the socket.
1383 * subflow_error_report() will introduce the appropriate barriers
1384 */
1385 subflow->reset_transient = 0;
1386 subflow->reset_reason = MPTCP_RST_EMPTCP;
1387
1388 reset:
1389 WRITE_ONCE(ssk->sk_err, EBADMSG);
1390 tcp_set_state(ssk, TCP_CLOSE);
1391 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1392 sk_eat_skb(ssk, skb);
1393 mptcp_send_active_reset_reason(ssk);
1394 WRITE_ONCE(subflow->data_avail, false);
1395 return false;
1396 }
1397
1398 mptcp_do_fallback(ssk);
1399 }
1400
1401 skb = skb_peek(&ssk->sk_receive_queue);
1402 subflow->map_valid = 1;
1403 subflow->map_seq = READ_ONCE(msk->ack_seq);
1404 subflow->map_data_len = skb->len;
1405 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1406 WRITE_ONCE(subflow->data_avail, true);
1407 return true;
1408 }
1409
mptcp_subflow_data_available(struct sock * sk)1410 bool mptcp_subflow_data_available(struct sock *sk)
1411 {
1412 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1413
1414 /* check if current mapping is still valid */
1415 if (subflow->map_valid &&
1416 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1417 subflow->map_valid = 0;
1418 WRITE_ONCE(subflow->data_avail, false);
1419
1420 pr_debug("Done with mapping: seq=%u data_len=%u\n",
1421 subflow->map_subflow_seq,
1422 subflow->map_data_len);
1423 }
1424
1425 return subflow_check_data_avail(sk);
1426 }
1427
1428 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1429 * not the ssk one.
1430 *
1431 * In mptcp, rwin is about the mptcp-level connection data.
1432 *
1433 * Data that is still on the ssk rx queue can thus be ignored,
1434 * as far as mptcp peer is concerned that data is still inflight.
1435 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1436 */
mptcp_space(const struct sock * ssk,int * space,int * full_space)1437 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1438 {
1439 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1440 const struct sock *sk = subflow->conn;
1441
1442 *space = __mptcp_space(sk);
1443 *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1444 }
1445
subflow_error_report(struct sock * ssk)1446 static void subflow_error_report(struct sock *ssk)
1447 {
1448 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1449
1450 /* bail early if this is a no-op, so that we avoid introducing a
1451 * problematic lockdep dependency between TCP accept queue lock
1452 * and msk socket spinlock
1453 */
1454 if (!sk->sk_socket)
1455 return;
1456
1457 mptcp_data_lock(sk);
1458 if (!sock_owned_by_user(sk))
1459 __mptcp_error_report(sk);
1460 else
1461 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags);
1462 mptcp_data_unlock(sk);
1463 }
1464
subflow_data_ready(struct sock * sk)1465 static void subflow_data_ready(struct sock *sk)
1466 {
1467 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1468 u16 state = 1 << inet_sk_state_load(sk);
1469 struct sock *parent = subflow->conn;
1470 struct mptcp_sock *msk;
1471
1472 trace_sk_data_ready(sk);
1473
1474 msk = mptcp_sk(parent);
1475 if (state & TCPF_LISTEN) {
1476 /* MPJ subflow are removed from accept queue before reaching here,
1477 * avoid stray wakeups
1478 */
1479 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1480 return;
1481
1482 parent->sk_data_ready(parent);
1483 return;
1484 }
1485
1486 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1487 !subflow->mp_join && !(state & TCPF_CLOSE));
1488
1489 if (mptcp_subflow_data_available(sk)) {
1490 mptcp_data_ready(parent, sk);
1491
1492 /* subflow-level lowat test are not relevant.
1493 * respect the msk-level threshold eventually mandating an immediate ack
1494 */
1495 if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
1496 (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss)
1497 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
1498 } else if (unlikely(sk->sk_err)) {
1499 subflow_error_report(sk);
1500 }
1501 }
1502
subflow_write_space(struct sock * ssk)1503 static void subflow_write_space(struct sock *ssk)
1504 {
1505 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1506
1507 mptcp_propagate_sndbuf(sk, ssk);
1508 mptcp_write_space(sk);
1509 }
1510
1511 static const struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock * sk)1512 subflow_default_af_ops(struct sock *sk)
1513 {
1514 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1515 if (sk->sk_family == AF_INET6)
1516 return &subflow_v6_specific;
1517 #endif
1518 return &subflow_specific;
1519 }
1520
1521 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
mptcpv6_handle_mapped(struct sock * sk,bool mapped)1522 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1523 {
1524 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1525 struct inet_connection_sock *icsk = inet_csk(sk);
1526 const struct inet_connection_sock_af_ops *target;
1527
1528 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1529
1530 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
1531 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1532
1533 if (likely(icsk->icsk_af_ops == target))
1534 return;
1535
1536 subflow->icsk_af_ops = icsk->icsk_af_ops;
1537 icsk->icsk_af_ops = target;
1538 }
1539 #endif
1540
mptcp_info2sockaddr(const struct mptcp_addr_info * info,struct sockaddr_storage * addr,unsigned short family)1541 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1542 struct sockaddr_storage *addr,
1543 unsigned short family)
1544 {
1545 memset(addr, 0, sizeof(*addr));
1546 addr->ss_family = family;
1547 if (addr->ss_family == AF_INET) {
1548 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1549
1550 if (info->family == AF_INET)
1551 in_addr->sin_addr = info->addr;
1552 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1553 else if (ipv6_addr_v4mapped(&info->addr6))
1554 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1555 #endif
1556 in_addr->sin_port = info->port;
1557 }
1558 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1559 else if (addr->ss_family == AF_INET6) {
1560 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1561
1562 if (info->family == AF_INET)
1563 ipv6_addr_set_v4mapped(info->addr.s_addr,
1564 &in6_addr->sin6_addr);
1565 else
1566 in6_addr->sin6_addr = info->addr6;
1567 in6_addr->sin6_port = info->port;
1568 }
1569 #endif
1570 }
1571
__mptcp_subflow_connect(struct sock * sk,const struct mptcp_pm_local * local,const struct mptcp_addr_info * remote)1572 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local,
1573 const struct mptcp_addr_info *remote)
1574 {
1575 struct mptcp_sock *msk = mptcp_sk(sk);
1576 struct mptcp_subflow_context *subflow;
1577 int local_id = local->addr.id;
1578 struct sockaddr_storage addr;
1579 int remote_id = remote->id;
1580 int err = -ENOTCONN;
1581 struct socket *sf;
1582 struct sock *ssk;
1583 u32 remote_token;
1584 int addrlen;
1585
1586 /* The userspace PM sent the request too early? */
1587 if (!mptcp_is_fully_established(sk))
1588 goto err_out;
1589
1590 err = mptcp_subflow_create_socket(sk, local->addr.family, &sf);
1591 if (err) {
1592 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCREATSKERR);
1593 pr_debug("msk=%p local=%d remote=%d create sock error: %d\n",
1594 msk, local_id, remote_id, err);
1595 goto err_out;
1596 }
1597
1598 ssk = sf->sk;
1599 subflow = mptcp_subflow_ctx(ssk);
1600 do {
1601 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1602 } while (!subflow->local_nonce);
1603
1604 /* if 'IPADDRANY', the ID will be set later, after the routing */
1605 if (local->addr.family == AF_INET) {
1606 if (!local->addr.addr.s_addr)
1607 local_id = -1;
1608 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1609 } else if (sk->sk_family == AF_INET6) {
1610 if (ipv6_addr_any(&local->addr.addr6))
1611 local_id = -1;
1612 #endif
1613 }
1614
1615 if (local_id >= 0)
1616 subflow_set_local_id(subflow, local_id);
1617
1618 subflow->remote_key_valid = 1;
1619 subflow->remote_key = READ_ONCE(msk->remote_key);
1620 subflow->local_key = READ_ONCE(msk->local_key);
1621 subflow->token = msk->token;
1622 mptcp_info2sockaddr(&local->addr, &addr, ssk->sk_family);
1623
1624 addrlen = sizeof(struct sockaddr_in);
1625 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1626 if (addr.ss_family == AF_INET6)
1627 addrlen = sizeof(struct sockaddr_in6);
1628 #endif
1629 ssk->sk_bound_dev_if = local->ifindex;
1630 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1631 if (err) {
1632 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXBINDERR);
1633 pr_debug("msk=%p local=%d remote=%d bind error: %d\n",
1634 msk, local_id, remote_id, err);
1635 goto failed;
1636 }
1637
1638 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1639 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
1640 remote_token, local_id, remote_id);
1641 subflow->remote_token = remote_token;
1642 WRITE_ONCE(subflow->remote_id, remote_id);
1643 subflow->request_join = 1;
1644 subflow->request_bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1645 subflow->subflow_id = msk->subflow_id++;
1646 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1647
1648 sock_hold(ssk);
1649 list_add_tail(&subflow->node, &msk->conn_list);
1650 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1651 if (err && err != -EINPROGRESS) {
1652 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCONNECTERR);
1653 pr_debug("msk=%p local=%d remote=%d connect error: %d\n",
1654 msk, local_id, remote_id, err);
1655 goto failed_unlink;
1656 }
1657
1658 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTX);
1659
1660 /* discard the subflow socket */
1661 mptcp_sock_graft(ssk, sk->sk_socket);
1662 iput(SOCK_INODE(sf));
1663 WRITE_ONCE(msk->allow_infinite_fallback, false);
1664 mptcp_stop_tout_timer(sk);
1665 return 0;
1666
1667 failed_unlink:
1668 list_del(&subflow->node);
1669 sock_put(mptcp_subflow_tcp_sock(subflow));
1670
1671 failed:
1672 subflow->disposable = 1;
1673 sock_release(sf);
1674
1675 err_out:
1676 /* we account subflows before the creation, and this failures will not
1677 * be caught by sk_state_change()
1678 */
1679 mptcp_pm_close_subflow(msk);
1680 return err;
1681 }
1682
mptcp_attach_cgroup(struct sock * parent,struct sock * child)1683 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1684 {
1685 #ifdef CONFIG_SOCK_CGROUP_DATA
1686 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1687 *child_skcd = &child->sk_cgrp_data;
1688
1689 /* only the additional subflows created by kworkers have to be modified */
1690 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1691 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1692 #ifdef CONFIG_MEMCG
1693 struct mem_cgroup *memcg = parent->sk_memcg;
1694
1695 mem_cgroup_sk_free(child);
1696 if (memcg && css_tryget(&memcg->css))
1697 child->sk_memcg = memcg;
1698 #endif /* CONFIG_MEMCG */
1699
1700 cgroup_sk_free(child_skcd);
1701 *child_skcd = *parent_skcd;
1702 cgroup_sk_clone(child_skcd);
1703 }
1704 #endif /* CONFIG_SOCK_CGROUP_DATA */
1705 }
1706
mptcp_subflow_ops_override(struct sock * ssk)1707 static void mptcp_subflow_ops_override(struct sock *ssk)
1708 {
1709 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1710 if (ssk->sk_prot == &tcpv6_prot)
1711 ssk->sk_prot = &tcpv6_prot_override;
1712 else
1713 #endif
1714 ssk->sk_prot = &tcp_prot_override;
1715 }
1716
mptcp_subflow_ops_undo_override(struct sock * ssk)1717 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1718 {
1719 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1720 if (ssk->sk_prot == &tcpv6_prot_override)
1721 ssk->sk_prot = &tcpv6_prot;
1722 else
1723 #endif
1724 ssk->sk_prot = &tcp_prot;
1725 }
1726
mptcp_subflow_create_socket(struct sock * sk,unsigned short family,struct socket ** new_sock)1727 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1728 struct socket **new_sock)
1729 {
1730 struct mptcp_subflow_context *subflow;
1731 struct net *net = sock_net(sk);
1732 struct socket *sf;
1733 int err;
1734
1735 /* un-accepted server sockets can reach here - on bad configuration
1736 * bail early to avoid greater trouble later
1737 */
1738 if (unlikely(!sk->sk_socket))
1739 return -EINVAL;
1740
1741 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1742 if (err)
1743 return err;
1744
1745 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1746
1747 err = security_mptcp_add_subflow(sk, sf->sk);
1748 if (err)
1749 goto err_free;
1750
1751 /* the newly created socket has to be in the same cgroup as its parent */
1752 mptcp_attach_cgroup(sk, sf->sk);
1753
1754 /* kernel sockets do not by default acquire net ref, but TCP timer
1755 * needs it.
1756 * Update ns_tracker to current stack trace and refcounted tracker.
1757 */
1758 __netns_tracker_free(net, &sf->sk->ns_tracker, false);
1759 sf->sk->sk_net_refcnt = 1;
1760 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1761 sock_inuse_add(net, 1);
1762 err = tcp_set_ulp(sf->sk, "mptcp");
1763 if (err)
1764 goto err_free;
1765
1766 mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk);
1767 release_sock(sf->sk);
1768
1769 /* the newly created socket really belongs to the owning MPTCP
1770 * socket, even if for additional subflows the allocation is performed
1771 * by a kernel workqueue. Adjust inode references, so that the
1772 * procfs/diag interfaces really show this one belonging to the correct
1773 * user.
1774 */
1775 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1776 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1777 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1778
1779 subflow = mptcp_subflow_ctx(sf->sk);
1780 pr_debug("subflow=%p\n", subflow);
1781
1782 *new_sock = sf;
1783 sock_hold(sk);
1784 subflow->conn = sk;
1785 mptcp_subflow_ops_override(sf->sk);
1786
1787 return 0;
1788
1789 err_free:
1790 release_sock(sf->sk);
1791 sock_release(sf);
1792 return err;
1793 }
1794
subflow_create_ctx(struct sock * sk,gfp_t priority)1795 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1796 gfp_t priority)
1797 {
1798 struct inet_connection_sock *icsk = inet_csk(sk);
1799 struct mptcp_subflow_context *ctx;
1800
1801 ctx = kzalloc(sizeof(*ctx), priority);
1802 if (!ctx)
1803 return NULL;
1804
1805 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1806 INIT_LIST_HEAD(&ctx->node);
1807 INIT_LIST_HEAD(&ctx->delegated_node);
1808
1809 pr_debug("subflow=%p\n", ctx);
1810
1811 ctx->tcp_sock = sk;
1812 WRITE_ONCE(ctx->local_id, -1);
1813
1814 return ctx;
1815 }
1816
__subflow_state_change(struct sock * sk)1817 static void __subflow_state_change(struct sock *sk)
1818 {
1819 struct socket_wq *wq;
1820
1821 rcu_read_lock();
1822 wq = rcu_dereference(sk->sk_wq);
1823 if (skwq_has_sleeper(wq))
1824 wake_up_interruptible_all(&wq->wait);
1825 rcu_read_unlock();
1826 }
1827
subflow_is_done(const struct sock * sk)1828 static bool subflow_is_done(const struct sock *sk)
1829 {
1830 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1831 }
1832
subflow_state_change(struct sock * sk)1833 static void subflow_state_change(struct sock *sk)
1834 {
1835 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1836 struct sock *parent = subflow->conn;
1837 struct mptcp_sock *msk;
1838
1839 __subflow_state_change(sk);
1840
1841 msk = mptcp_sk(parent);
1842 if (subflow_simultaneous_connect(sk)) {
1843 mptcp_do_fallback(sk);
1844 pr_fallback(msk);
1845 subflow->conn_finished = 1;
1846 mptcp_propagate_state(parent, sk, subflow, NULL);
1847 }
1848
1849 /* as recvmsg() does not acquire the subflow socket for ssk selection
1850 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1851 * the data available machinery here.
1852 */
1853 if (mptcp_subflow_data_available(sk))
1854 mptcp_data_ready(parent, sk);
1855 else if (unlikely(sk->sk_err))
1856 subflow_error_report(sk);
1857
1858 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1859
1860 /* when the fallback subflow closes the rx side, trigger a 'dummy'
1861 * ingress data fin, so that the msk state will follow along
1862 */
1863 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1864 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1865 mptcp_schedule_work(parent);
1866 }
1867
mptcp_subflow_queue_clean(struct sock * listener_sk,struct sock * listener_ssk)1868 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
1869 {
1870 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1871 struct request_sock *req, *head, *tail;
1872 struct mptcp_subflow_context *subflow;
1873 struct sock *sk, *ssk;
1874
1875 /* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
1876 * Splice the req list, so that accept() can not reach the pending ssk after
1877 * the listener socket is released below.
1878 */
1879 spin_lock_bh(&queue->rskq_lock);
1880 head = queue->rskq_accept_head;
1881 tail = queue->rskq_accept_tail;
1882 queue->rskq_accept_head = NULL;
1883 queue->rskq_accept_tail = NULL;
1884 spin_unlock_bh(&queue->rskq_lock);
1885
1886 if (!head)
1887 return;
1888
1889 /* can't acquire the msk socket lock under the subflow one,
1890 * or will cause ABBA deadlock
1891 */
1892 release_sock(listener_ssk);
1893
1894 for (req = head; req; req = req->dl_next) {
1895 ssk = req->sk;
1896 if (!sk_is_mptcp(ssk))
1897 continue;
1898
1899 subflow = mptcp_subflow_ctx(ssk);
1900 if (!subflow || !subflow->conn)
1901 continue;
1902
1903 sk = subflow->conn;
1904 sock_hold(sk);
1905
1906 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1907 __mptcp_unaccepted_force_close(sk);
1908 release_sock(sk);
1909
1910 /* lockdep will report a false positive ABBA deadlock
1911 * between cancel_work_sync and the listener socket.
1912 * The involved locks belong to different sockets WRT
1913 * the existing AB chain.
1914 * Using a per socket key is problematic as key
1915 * deregistration requires process context and must be
1916 * performed at socket disposal time, in atomic
1917 * context.
1918 * Just tell lockdep to consider the listener socket
1919 * released here.
1920 */
1921 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
1922 mptcp_cancel_work(sk);
1923 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1924
1925 sock_put(sk);
1926 }
1927
1928 /* we are still under the listener msk socket lock */
1929 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1930
1931 /* restore the listener queue, to let the TCP code clean it up */
1932 spin_lock_bh(&queue->rskq_lock);
1933 WARN_ON_ONCE(queue->rskq_accept_head);
1934 queue->rskq_accept_head = head;
1935 queue->rskq_accept_tail = tail;
1936 spin_unlock_bh(&queue->rskq_lock);
1937 }
1938
subflow_ulp_init(struct sock * sk)1939 static int subflow_ulp_init(struct sock *sk)
1940 {
1941 struct inet_connection_sock *icsk = inet_csk(sk);
1942 struct mptcp_subflow_context *ctx;
1943 struct tcp_sock *tp = tcp_sk(sk);
1944 int err = 0;
1945
1946 /* disallow attaching ULP to a socket unless it has been
1947 * created with sock_create_kern()
1948 */
1949 if (!sk->sk_kern_sock) {
1950 err = -EOPNOTSUPP;
1951 goto out;
1952 }
1953
1954 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1955 if (!ctx) {
1956 err = -ENOMEM;
1957 goto out;
1958 }
1959
1960 pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
1961
1962 tp->is_mptcp = 1;
1963 ctx->icsk_af_ops = icsk->icsk_af_ops;
1964 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1965 ctx->tcp_state_change = sk->sk_state_change;
1966 ctx->tcp_error_report = sk->sk_error_report;
1967
1968 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1969 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1970
1971 sk->sk_data_ready = subflow_data_ready;
1972 sk->sk_write_space = subflow_write_space;
1973 sk->sk_state_change = subflow_state_change;
1974 sk->sk_error_report = subflow_error_report;
1975 out:
1976 return err;
1977 }
1978
subflow_ulp_release(struct sock * ssk)1979 static void subflow_ulp_release(struct sock *ssk)
1980 {
1981 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1982 bool release = true;
1983 struct sock *sk;
1984
1985 if (!ctx)
1986 return;
1987
1988 sk = ctx->conn;
1989 if (sk) {
1990 /* if the msk has been orphaned, keep the ctx
1991 * alive, will be freed by __mptcp_close_ssk(),
1992 * when the subflow is still unaccepted
1993 */
1994 release = ctx->disposable || list_empty(&ctx->node);
1995
1996 /* inet_child_forget() does not call sk_state_change(),
1997 * explicitly trigger the socket close machinery
1998 */
1999 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
2000 &mptcp_sk(sk)->flags))
2001 mptcp_schedule_work(sk);
2002 sock_put(sk);
2003 }
2004
2005 mptcp_subflow_ops_undo_override(ssk);
2006 if (release)
2007 kfree_rcu(ctx, rcu);
2008 }
2009
subflow_ulp_clone(const struct request_sock * req,struct sock * newsk,const gfp_t priority)2010 static void subflow_ulp_clone(const struct request_sock *req,
2011 struct sock *newsk,
2012 const gfp_t priority)
2013 {
2014 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
2015 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
2016 struct mptcp_subflow_context *new_ctx;
2017
2018 if (!tcp_rsk(req)->is_mptcp ||
2019 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
2020 subflow_ulp_fallback(newsk, old_ctx);
2021 return;
2022 }
2023
2024 new_ctx = subflow_create_ctx(newsk, priority);
2025 if (!new_ctx) {
2026 subflow_ulp_fallback(newsk, old_ctx);
2027 return;
2028 }
2029
2030 new_ctx->conn_finished = 1;
2031 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
2032 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
2033 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
2034 new_ctx->rel_write_seq = 1;
2035 new_ctx->tcp_sock = newsk;
2036
2037 if (subflow_req->mp_capable) {
2038 /* see comments in subflow_syn_recv_sock(), MPTCP connection
2039 * is fully established only after we receive the remote key
2040 */
2041 new_ctx->mp_capable = 1;
2042 new_ctx->local_key = subflow_req->local_key;
2043 new_ctx->token = subflow_req->token;
2044 new_ctx->ssn_offset = subflow_req->ssn_offset;
2045 new_ctx->idsn = subflow_req->idsn;
2046
2047 /* this is the first subflow, id is always 0 */
2048 subflow_set_local_id(new_ctx, 0);
2049 } else if (subflow_req->mp_join) {
2050 new_ctx->ssn_offset = subflow_req->ssn_offset;
2051 new_ctx->mp_join = 1;
2052 new_ctx->fully_established = 1;
2053 new_ctx->remote_key_valid = 1;
2054 new_ctx->backup = subflow_req->backup;
2055 new_ctx->request_bkup = subflow_req->request_bkup;
2056 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
2057 new_ctx->token = subflow_req->token;
2058 new_ctx->thmac = subflow_req->thmac;
2059
2060 /* the subflow req id is valid, fetched via subflow_check_req()
2061 * and subflow_token_join_request()
2062 */
2063 subflow_set_local_id(new_ctx, subflow_req->local_id);
2064 }
2065 }
2066
tcp_release_cb_override(struct sock * ssk)2067 static void tcp_release_cb_override(struct sock *ssk)
2068 {
2069 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
2070 long status;
2071
2072 /* process and clear all the pending actions, but leave the subflow into
2073 * the napi queue. To respect locking, only the same CPU that originated
2074 * the action can touch the list. mptcp_napi_poll will take care of it.
2075 */
2076 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
2077 if (status)
2078 mptcp_subflow_process_delegated(ssk, status);
2079
2080 tcp_release_cb(ssk);
2081 }
2082
tcp_abort_override(struct sock * ssk,int err)2083 static int tcp_abort_override(struct sock *ssk, int err)
2084 {
2085 /* closing a listener subflow requires a great deal of care.
2086 * keep it simple and just prevent such operation
2087 */
2088 if (inet_sk_state_load(ssk) == TCP_LISTEN)
2089 return -EINVAL;
2090
2091 return tcp_abort(ssk, err);
2092 }
2093
2094 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
2095 .name = "mptcp",
2096 .owner = THIS_MODULE,
2097 .init = subflow_ulp_init,
2098 .release = subflow_ulp_release,
2099 .clone = subflow_ulp_clone,
2100 };
2101
subflow_ops_init(struct request_sock_ops * subflow_ops)2102 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
2103 {
2104 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
2105
2106 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
2107 subflow_ops->obj_size, 0,
2108 SLAB_ACCOUNT |
2109 SLAB_TYPESAFE_BY_RCU,
2110 NULL);
2111 if (!subflow_ops->slab)
2112 return -ENOMEM;
2113
2114 return 0;
2115 }
2116
mptcp_subflow_init(void)2117 void __init mptcp_subflow_init(void)
2118 {
2119 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
2120 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
2121 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
2122
2123 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
2124 panic("MPTCP: failed to init subflow v4 request sock ops\n");
2125
2126 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
2127 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
2128 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
2129
2130 subflow_specific = ipv4_specific;
2131 subflow_specific.conn_request = subflow_v4_conn_request;
2132 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
2133 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
2134 subflow_specific.rebuild_header = subflow_rebuild_header;
2135
2136 tcp_prot_override = tcp_prot;
2137 tcp_prot_override.release_cb = tcp_release_cb_override;
2138 tcp_prot_override.diag_destroy = tcp_abort_override;
2139
2140 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2141 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
2142 * structures for v4 and v6 have the same size. It should not changed in
2143 * the future but better to make sure to be warned if it is no longer
2144 * the case.
2145 */
2146 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
2147
2148 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
2149 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
2150 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
2151
2152 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
2153 panic("MPTCP: failed to init subflow v6 request sock ops\n");
2154
2155 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
2156 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
2157 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
2158
2159 subflow_v6_specific = ipv6_specific;
2160 subflow_v6_specific.conn_request = subflow_v6_conn_request;
2161 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
2162 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
2163 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
2164
2165 subflow_v6m_specific = subflow_v6_specific;
2166 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
2167 subflow_v6m_specific.send_check = ipv4_specific.send_check;
2168 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
2169 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
2170 subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
2171
2172 tcpv6_prot_override = tcpv6_prot;
2173 tcpv6_prot_override.release_cb = tcp_release_cb_override;
2174 tcpv6_prot_override.diag_destroy = tcp_abort_override;
2175 #endif
2176
2177 mptcp_diag_subflow_init(&subflow_ulp_ops);
2178
2179 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
2180 panic("MPTCP: failed to register subflows to ULP\n");
2181 }
2182