1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/algapi.h> 13 #include <crypto/sha2.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp.h> 19 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 20 #include <net/ip6_route.h> 21 #include <net/transp_v6.h> 22 #endif 23 #include <net/mptcp.h> 24 #include <uapi/linux/mptcp.h> 25 #include "protocol.h" 26 #include "mib.h" 27 28 #include <trace/events/mptcp.h> 29 #include <trace/events/sock.h> 30 31 static void mptcp_subflow_ops_undo_override(struct sock *ssk); 32 33 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, 34 enum linux_mptcp_mib_field field) 35 { 36 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); 37 } 38 39 static void subflow_req_destructor(struct request_sock *req) 40 { 41 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 42 43 pr_debug("subflow_req=%p", subflow_req); 44 45 if (subflow_req->msk) 46 sock_put((struct sock *)subflow_req->msk); 47 48 mptcp_token_destroy_request(req); 49 } 50 51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, 52 void *hmac) 53 { 54 u8 msg[8]; 55 56 put_unaligned_be32(nonce1, &msg[0]); 57 put_unaligned_be32(nonce2, &msg[4]); 58 59 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 60 } 61 62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) 63 { 64 return mptcp_is_fully_established((void *)msk) && 65 ((mptcp_pm_is_userspace(msk) && 66 mptcp_userspace_pm_active(msk)) || 67 READ_ONCE(msk->pm.accept_subflow)); 68 } 69 70 /* validate received token and create truncated hmac and nonce for SYN-ACK */ 71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req) 72 { 73 struct mptcp_sock *msk = subflow_req->msk; 74 u8 hmac[SHA256_DIGEST_SIZE]; 75 76 get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); 77 78 subflow_generate_hmac(msk->local_key, msk->remote_key, 79 subflow_req->local_nonce, 80 subflow_req->remote_nonce, hmac); 81 82 subflow_req->thmac = get_unaligned_be64(hmac); 83 } 84 85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) 86 { 87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 88 struct mptcp_sock *msk; 89 int local_id; 90 91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token); 92 if (!msk) { 93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 94 return NULL; 95 } 96 97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); 98 if (local_id < 0) { 99 sock_put((struct sock *)msk); 100 return NULL; 101 } 102 subflow_req->local_id = local_id; 103 104 return msk; 105 } 106 107 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) 108 { 109 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 110 111 subflow_req->mp_capable = 0; 112 subflow_req->mp_join = 0; 113 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); 114 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener)); 115 subflow_req->msk = NULL; 116 mptcp_token_init_request(req); 117 } 118 119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) 120 { 121 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; 122 } 123 124 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason) 125 { 126 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 127 128 if (mpext) { 129 memset(mpext, 0, sizeof(*mpext)); 130 mpext->reset_reason = reason; 131 } 132 } 133 134 /* Init mptcp request socket. 135 * 136 * Returns an error code if a JOIN has failed and a TCP reset 137 * should be sent. 138 */ 139 static int subflow_check_req(struct request_sock *req, 140 const struct sock *sk_listener, 141 struct sk_buff *skb) 142 { 143 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 144 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 145 struct mptcp_options_received mp_opt; 146 bool opt_mp_capable, opt_mp_join; 147 148 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 149 150 #ifdef CONFIG_TCP_MD5SIG 151 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 152 * TCP option space. 153 */ 154 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) 155 return -EINVAL; 156 #endif 157 158 mptcp_get_options(skb, &mp_opt); 159 160 opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC); 161 opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ); 162 if (opt_mp_capable) { 163 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 164 165 if (opt_mp_join) 166 return 0; 167 } else if (opt_mp_join) { 168 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 169 } 170 171 if (opt_mp_capable && listener->request_mptcp) { 172 int err, retries = MPTCP_TOKEN_MAX_RETRIES; 173 174 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 175 again: 176 do { 177 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); 178 } while (subflow_req->local_key == 0); 179 180 if (unlikely(req->syncookie)) { 181 mptcp_crypto_key_sha(subflow_req->local_key, 182 &subflow_req->token, 183 &subflow_req->idsn); 184 if (mptcp_token_exists(subflow_req->token)) { 185 if (retries-- > 0) 186 goto again; 187 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 188 } else { 189 subflow_req->mp_capable = 1; 190 } 191 return 0; 192 } 193 194 err = mptcp_token_new_request(req); 195 if (err == 0) 196 subflow_req->mp_capable = 1; 197 else if (retries-- > 0) 198 goto again; 199 else 200 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 201 202 } else if (opt_mp_join && listener->request_mptcp) { 203 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 204 subflow_req->mp_join = 1; 205 subflow_req->backup = mp_opt.backup; 206 subflow_req->remote_id = mp_opt.join_id; 207 subflow_req->token = mp_opt.token; 208 subflow_req->remote_nonce = mp_opt.nonce; 209 subflow_req->msk = subflow_token_join_request(req); 210 211 /* Can't fall back to TCP in this case. */ 212 if (!subflow_req->msk) { 213 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 214 return -EPERM; 215 } 216 217 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { 218 pr_debug("syn inet_sport=%d %d", 219 ntohs(inet_sk(sk_listener)->inet_sport), 220 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); 221 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { 222 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); 223 return -EPERM; 224 } 225 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX); 226 } 227 228 subflow_req_create_thmac(subflow_req); 229 230 if (unlikely(req->syncookie)) { 231 if (mptcp_can_accept_new_subflow(subflow_req->msk)) 232 subflow_init_req_cookie_join_save(subflow_req, skb); 233 else 234 return -EPERM; 235 } 236 237 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, 238 subflow_req->remote_nonce, subflow_req->msk); 239 } 240 241 return 0; 242 } 243 244 int mptcp_subflow_init_cookie_req(struct request_sock *req, 245 const struct sock *sk_listener, 246 struct sk_buff *skb) 247 { 248 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 249 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 250 struct mptcp_options_received mp_opt; 251 bool opt_mp_capable, opt_mp_join; 252 int err; 253 254 subflow_init_req(req, sk_listener); 255 mptcp_get_options(skb, &mp_opt); 256 257 opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC); 258 opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ); 259 if (opt_mp_capable && opt_mp_join) 260 return -EINVAL; 261 262 if (opt_mp_capable && listener->request_mptcp) { 263 if (mp_opt.sndr_key == 0) 264 return -EINVAL; 265 266 subflow_req->local_key = mp_opt.rcvr_key; 267 err = mptcp_token_new_request(req); 268 if (err) 269 return err; 270 271 subflow_req->mp_capable = 1; 272 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 273 } else if (opt_mp_join && listener->request_mptcp) { 274 if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) 275 return -EINVAL; 276 277 subflow_req->mp_join = 1; 278 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 279 } 280 281 return 0; 282 } 283 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); 284 285 static struct dst_entry *subflow_v4_route_req(const struct sock *sk, 286 struct sk_buff *skb, 287 struct flowi *fl, 288 struct request_sock *req) 289 { 290 struct dst_entry *dst; 291 int err; 292 293 tcp_rsk(req)->is_mptcp = 1; 294 subflow_init_req(req, sk); 295 296 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req); 297 if (!dst) 298 return NULL; 299 300 err = subflow_check_req(req, sk, skb); 301 if (err == 0) 302 return dst; 303 304 dst_release(dst); 305 if (!req->syncookie) 306 tcp_request_sock_ops.send_reset(sk, skb); 307 return NULL; 308 } 309 310 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req, 311 struct tcp_fastopen_cookie *foc, 312 enum tcp_synack_type synack_type) 313 { 314 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 315 struct inet_request_sock *ireq = inet_rsk(req); 316 317 /* clear tstamp_ok, as needed depending on cookie */ 318 if (foc && foc->len > -1) 319 ireq->tstamp_ok = 0; 320 321 if (synack_type == TCP_SYNACK_FASTOPEN) 322 mptcp_fastopen_subflow_synack_set_params(subflow, req); 323 } 324 325 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst, 326 struct flowi *fl, 327 struct request_sock *req, 328 struct tcp_fastopen_cookie *foc, 329 enum tcp_synack_type synack_type, 330 struct sk_buff *syn_skb) 331 { 332 subflow_prep_synack(sk, req, foc, synack_type); 333 334 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, 335 synack_type, syn_skb); 336 } 337 338 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 339 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, 340 struct flowi *fl, 341 struct request_sock *req, 342 struct tcp_fastopen_cookie *foc, 343 enum tcp_synack_type synack_type, 344 struct sk_buff *syn_skb) 345 { 346 subflow_prep_synack(sk, req, foc, synack_type); 347 348 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, 349 synack_type, syn_skb); 350 } 351 352 static struct dst_entry *subflow_v6_route_req(const struct sock *sk, 353 struct sk_buff *skb, 354 struct flowi *fl, 355 struct request_sock *req) 356 { 357 struct dst_entry *dst; 358 int err; 359 360 tcp_rsk(req)->is_mptcp = 1; 361 subflow_init_req(req, sk); 362 363 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req); 364 if (!dst) 365 return NULL; 366 367 err = subflow_check_req(req, sk, skb); 368 if (err == 0) 369 return dst; 370 371 dst_release(dst); 372 if (!req->syncookie) 373 tcp6_request_sock_ops.send_reset(sk, skb); 374 return NULL; 375 } 376 #endif 377 378 /* validate received truncated hmac and create hmac for third ACK */ 379 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 380 { 381 u8 hmac[SHA256_DIGEST_SIZE]; 382 u64 thmac; 383 384 subflow_generate_hmac(subflow->remote_key, subflow->local_key, 385 subflow->remote_nonce, subflow->local_nonce, 386 hmac); 387 388 thmac = get_unaligned_be64(hmac); 389 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", 390 subflow, subflow->token, thmac, subflow->thmac); 391 392 return thmac == subflow->thmac; 393 } 394 395 void mptcp_subflow_reset(struct sock *ssk) 396 { 397 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 398 struct sock *sk = subflow->conn; 399 400 /* mptcp_mp_fail_no_response() can reach here on an already closed 401 * socket 402 */ 403 if (ssk->sk_state == TCP_CLOSE) 404 return; 405 406 /* must hold: tcp_done() could drop last reference on parent */ 407 sock_hold(sk); 408 409 tcp_send_active_reset(ssk, GFP_ATOMIC); 410 tcp_done(ssk); 411 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags)) 412 mptcp_schedule_work(sk); 413 414 sock_put(sk); 415 } 416 417 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk) 418 { 419 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; 420 } 421 422 void __mptcp_set_connected(struct sock *sk) 423 { 424 if (sk->sk_state == TCP_SYN_SENT) { 425 inet_sk_state_store(sk, TCP_ESTABLISHED); 426 sk->sk_state_change(sk); 427 } 428 } 429 430 static void mptcp_set_connected(struct sock *sk) 431 { 432 mptcp_data_lock(sk); 433 if (!sock_owned_by_user(sk)) 434 __mptcp_set_connected(sk); 435 else 436 __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags); 437 mptcp_data_unlock(sk); 438 } 439 440 static void subflow_set_remote_key(struct mptcp_sock *msk, 441 struct mptcp_subflow_context *subflow, 442 const struct mptcp_options_received *mp_opt) 443 { 444 /* active MPC subflow will reach here multiple times: 445 * at subflow_finish_connect() time and at 4th ack time 446 */ 447 if (subflow->remote_key_valid) 448 return; 449 450 subflow->remote_key_valid = 1; 451 subflow->remote_key = mp_opt->sndr_key; 452 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); 453 subflow->iasn++; 454 455 WRITE_ONCE(msk->remote_key, subflow->remote_key); 456 WRITE_ONCE(msk->ack_seq, subflow->iasn); 457 WRITE_ONCE(msk->can_ack, true); 458 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); 459 } 460 461 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 462 { 463 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 464 struct mptcp_options_received mp_opt; 465 struct sock *parent = subflow->conn; 466 struct mptcp_sock *msk; 467 468 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 469 470 /* be sure no special action on any packet other than syn-ack */ 471 if (subflow->conn_finished) 472 return; 473 474 msk = mptcp_sk(parent); 475 mptcp_propagate_sndbuf(parent, sk); 476 subflow->rel_write_seq = 1; 477 subflow->conn_finished = 1; 478 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 479 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); 480 481 mptcp_get_options(skb, &mp_opt); 482 if (subflow->request_mptcp) { 483 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) { 484 MPTCP_INC_STATS(sock_net(sk), 485 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); 486 mptcp_do_fallback(sk); 487 pr_fallback(msk); 488 goto fallback; 489 } 490 491 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) 492 WRITE_ONCE(msk->csum_enabled, true); 493 if (mp_opt.deny_join_id0) 494 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 495 subflow->mp_capable = 1; 496 subflow_set_remote_key(msk, subflow, &mp_opt); 497 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 498 mptcp_finish_connect(sk); 499 mptcp_set_connected(parent); 500 } else if (subflow->request_join) { 501 u8 hmac[SHA256_DIGEST_SIZE]; 502 503 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) { 504 subflow->reset_reason = MPTCP_RST_EMPTCP; 505 goto do_reset; 506 } 507 508 subflow->backup = mp_opt.backup; 509 subflow->thmac = mp_opt.thmac; 510 subflow->remote_nonce = mp_opt.nonce; 511 subflow->remote_id = mp_opt.join_id; 512 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d", 513 subflow, subflow->thmac, subflow->remote_nonce, 514 subflow->backup); 515 516 if (!subflow_thmac_valid(subflow)) { 517 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); 518 subflow->reset_reason = MPTCP_RST_EMPTCP; 519 goto do_reset; 520 } 521 522 if (!mptcp_finish_join(sk)) 523 goto do_reset; 524 525 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 526 subflow->local_nonce, 527 subflow->remote_nonce, 528 hmac); 529 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 530 531 subflow->mp_join = 1; 532 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 533 534 if (subflow_use_different_dport(msk, sk)) { 535 pr_debug("synack inet_dport=%d %d", 536 ntohs(inet_sk(sk)->inet_dport), 537 ntohs(inet_sk(parent)->inet_dport)); 538 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); 539 } 540 } else if (mptcp_check_fallback(sk)) { 541 fallback: 542 mptcp_rcv_space_init(msk, sk); 543 mptcp_set_connected(parent); 544 } 545 return; 546 547 do_reset: 548 subflow->reset_transient = 0; 549 mptcp_subflow_reset(sk); 550 } 551 552 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) 553 { 554 subflow->local_id = local_id; 555 subflow->local_id_valid = 1; 556 } 557 558 static int subflow_chk_local_id(struct sock *sk) 559 { 560 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 561 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 562 int err; 563 564 if (likely(subflow->local_id_valid)) 565 return 0; 566 567 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); 568 if (err < 0) 569 return err; 570 571 subflow_set_local_id(subflow, err); 572 return 0; 573 } 574 575 static int subflow_rebuild_header(struct sock *sk) 576 { 577 int err = subflow_chk_local_id(sk); 578 579 if (unlikely(err < 0)) 580 return err; 581 582 return inet_sk_rebuild_header(sk); 583 } 584 585 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 586 static int subflow_v6_rebuild_header(struct sock *sk) 587 { 588 int err = subflow_chk_local_id(sk); 589 590 if (unlikely(err < 0)) 591 return err; 592 593 return inet6_sk_rebuild_header(sk); 594 } 595 #endif 596 597 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; 598 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; 599 600 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 601 { 602 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 603 604 pr_debug("subflow=%p", subflow); 605 606 /* Never answer to SYNs sent to broadcast or multicast */ 607 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 608 goto drop; 609 610 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, 611 &subflow_request_sock_ipv4_ops, 612 sk, skb); 613 drop: 614 tcp_listendrop(sk); 615 return 0; 616 } 617 618 static void subflow_v4_req_destructor(struct request_sock *req) 619 { 620 subflow_req_destructor(req); 621 tcp_request_sock_ops.destructor(req); 622 } 623 624 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 625 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; 626 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; 627 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; 628 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; 629 static struct proto tcpv6_prot_override __ro_after_init; 630 631 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 632 { 633 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 634 635 pr_debug("subflow=%p", subflow); 636 637 if (skb->protocol == htons(ETH_P_IP)) 638 return subflow_v4_conn_request(sk, skb); 639 640 if (!ipv6_unicast_destination(skb)) 641 goto drop; 642 643 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 644 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 645 return 0; 646 } 647 648 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, 649 &subflow_request_sock_ipv6_ops, sk, skb); 650 651 drop: 652 tcp_listendrop(sk); 653 return 0; /* don't send reset */ 654 } 655 656 static void subflow_v6_req_destructor(struct request_sock *req) 657 { 658 subflow_req_destructor(req); 659 tcp6_request_sock_ops.destructor(req); 660 } 661 #endif 662 663 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, 664 struct sock *sk_listener, 665 bool attach_listener) 666 { 667 if (ops->family == AF_INET) 668 ops = &mptcp_subflow_v4_request_sock_ops; 669 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 670 else if (ops->family == AF_INET6) 671 ops = &mptcp_subflow_v6_request_sock_ops; 672 #endif 673 674 return inet_reqsk_alloc(ops, sk_listener, attach_listener); 675 } 676 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); 677 678 /* validate hmac received in third ACK */ 679 static bool subflow_hmac_valid(const struct request_sock *req, 680 const struct mptcp_options_received *mp_opt) 681 { 682 const struct mptcp_subflow_request_sock *subflow_req; 683 u8 hmac[SHA256_DIGEST_SIZE]; 684 struct mptcp_sock *msk; 685 686 subflow_req = mptcp_subflow_rsk(req); 687 msk = subflow_req->msk; 688 if (!msk) 689 return false; 690 691 subflow_generate_hmac(msk->remote_key, msk->local_key, 692 subflow_req->remote_nonce, 693 subflow_req->local_nonce, hmac); 694 695 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); 696 } 697 698 static void subflow_ulp_fallback(struct sock *sk, 699 struct mptcp_subflow_context *old_ctx) 700 { 701 struct inet_connection_sock *icsk = inet_csk(sk); 702 703 mptcp_subflow_tcp_fallback(sk, old_ctx); 704 icsk->icsk_ulp_ops = NULL; 705 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 706 tcp_sk(sk)->is_mptcp = 0; 707 708 mptcp_subflow_ops_undo_override(sk); 709 } 710 711 void mptcp_subflow_drop_ctx(struct sock *ssk) 712 { 713 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 714 715 if (!ctx) 716 return; 717 718 list_del(&mptcp_subflow_ctx(ssk)->node); 719 if (inet_csk(ssk)->icsk_ulp_ops) { 720 subflow_ulp_fallback(ssk, ctx); 721 if (ctx->conn) 722 sock_put(ctx->conn); 723 } 724 725 kfree_rcu(ctx, rcu); 726 } 727 728 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, 729 const struct mptcp_options_received *mp_opt) 730 { 731 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 732 733 subflow_set_remote_key(msk, subflow, mp_opt); 734 subflow->fully_established = 1; 735 WRITE_ONCE(msk->fully_established, true); 736 737 if (subflow->is_mptfo) 738 mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); 739 } 740 741 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 742 struct sk_buff *skb, 743 struct request_sock *req, 744 struct dst_entry *dst, 745 struct request_sock *req_unhash, 746 bool *own_req) 747 { 748 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 749 struct mptcp_subflow_request_sock *subflow_req; 750 struct mptcp_options_received mp_opt; 751 bool fallback, fallback_is_fatal; 752 struct mptcp_sock *owner; 753 struct sock *child; 754 755 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 756 757 /* After child creation we must look for MPC even when options 758 * are not parsed 759 */ 760 mp_opt.suboptions = 0; 761 762 /* hopefully temporary handling for MP_JOIN+syncookie */ 763 subflow_req = mptcp_subflow_rsk(req); 764 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; 765 fallback = !tcp_rsk(req)->is_mptcp; 766 if (fallback) 767 goto create_child; 768 769 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 770 if (subflow_req->mp_capable) { 771 /* we can receive and accept an in-window, out-of-order pkt, 772 * which may not carry the MP_CAPABLE opt even on mptcp enabled 773 * paths: always try to extract the peer key, and fallback 774 * for packets missing it. 775 * Even OoO DSS packets coming legitly after dropped or 776 * reordered MPC will cause fallback, but we don't have other 777 * options. 778 */ 779 mptcp_get_options(skb, &mp_opt); 780 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) 781 fallback = true; 782 783 } else if (subflow_req->mp_join) { 784 mptcp_get_options(skb, &mp_opt); 785 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) || 786 !subflow_hmac_valid(req, &mp_opt) || 787 !mptcp_can_accept_new_subflow(subflow_req->msk)) { 788 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 789 fallback = true; 790 } 791 } 792 793 create_child: 794 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 795 req_unhash, own_req); 796 797 if (child && *own_req) { 798 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 799 800 tcp_rsk(req)->drop_req = false; 801 802 /* we need to fallback on ctx allocation failure and on pre-reqs 803 * checking above. In the latter scenario we additionally need 804 * to reset the context to non MPTCP status. 805 */ 806 if (!ctx || fallback) { 807 if (fallback_is_fatal) { 808 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 809 goto dispose_child; 810 } 811 goto fallback; 812 } 813 814 /* ssk inherits options of listener sk */ 815 ctx->setsockopt_seq = listener->setsockopt_seq; 816 817 if (ctx->mp_capable) { 818 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); 819 if (!ctx->conn) 820 goto fallback; 821 822 ctx->subflow_id = 1; 823 owner = mptcp_sk(ctx->conn); 824 mptcp_pm_new_connection(owner, child, 1); 825 826 /* with OoO packets we can reach here without ingress 827 * mpc option 828 */ 829 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { 830 mptcp_subflow_fully_established(ctx, &mp_opt); 831 mptcp_pm_fully_established(owner, child); 832 ctx->pm_notified = 1; 833 } 834 } else if (ctx->mp_join) { 835 owner = subflow_req->msk; 836 if (!owner) { 837 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 838 goto dispose_child; 839 } 840 841 /* move the msk reference ownership to the subflow */ 842 subflow_req->msk = NULL; 843 ctx->conn = (struct sock *)owner; 844 845 if (subflow_use_different_sport(owner, sk)) { 846 pr_debug("ack inet_sport=%d %d", 847 ntohs(inet_sk(sk)->inet_sport), 848 ntohs(inet_sk((struct sock *)owner)->inet_sport)); 849 if (!mptcp_pm_sport_in_anno_list(owner, sk)) { 850 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX); 851 goto dispose_child; 852 } 853 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX); 854 } 855 856 if (!mptcp_finish_join(child)) 857 goto dispose_child; 858 859 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); 860 tcp_rsk(req)->drop_req = true; 861 } 862 } 863 864 /* check for expected invariant - should never trigger, just help 865 * catching eariler subtle bugs 866 */ 867 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 868 (!mptcp_subflow_ctx(child) || 869 !mptcp_subflow_ctx(child)->conn)); 870 return child; 871 872 dispose_child: 873 mptcp_subflow_drop_ctx(child); 874 tcp_rsk(req)->drop_req = true; 875 inet_csk_prepare_for_destroy_sock(child); 876 tcp_done(child); 877 req->rsk_ops->send_reset(sk, skb); 878 879 /* The last child reference will be released by the caller */ 880 return child; 881 882 fallback: 883 mptcp_subflow_drop_ctx(child); 884 return child; 885 } 886 887 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; 888 static struct proto tcp_prot_override __ro_after_init; 889 890 enum mapping_status { 891 MAPPING_OK, 892 MAPPING_INVALID, 893 MAPPING_EMPTY, 894 MAPPING_DATA_FIN, 895 MAPPING_DUMMY, 896 MAPPING_BAD_CSUM 897 }; 898 899 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 900 { 901 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d", 902 ssn, subflow->map_subflow_seq, subflow->map_data_len); 903 } 904 905 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 906 { 907 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 908 unsigned int skb_consumed; 909 910 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 911 if (WARN_ON_ONCE(skb_consumed >= skb->len)) 912 return true; 913 914 return skb->len - skb_consumed <= subflow->map_data_len - 915 mptcp_subflow_get_map_offset(subflow); 916 } 917 918 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 919 { 920 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 921 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 922 923 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 924 /* Mapping covers data later in the subflow stream, 925 * currently unsupported. 926 */ 927 dbg_bad_map(subflow, ssn); 928 return false; 929 } 930 if (unlikely(!before(ssn, subflow->map_subflow_seq + 931 subflow->map_data_len))) { 932 /* Mapping does covers past subflow data, invalid */ 933 dbg_bad_map(subflow, ssn); 934 return false; 935 } 936 return true; 937 } 938 939 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, 940 bool csum_reqd) 941 { 942 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 943 u32 offset, seq, delta; 944 __sum16 csum; 945 int len; 946 947 if (!csum_reqd) 948 return MAPPING_OK; 949 950 /* mapping already validated on previous traversal */ 951 if (subflow->map_csum_len == subflow->map_data_len) 952 return MAPPING_OK; 953 954 /* traverse the receive queue, ensuring it contains a full 955 * DSS mapping and accumulating the related csum. 956 * Preserve the accoumlate csum across multiple calls, to compute 957 * the csum only once 958 */ 959 delta = subflow->map_data_len - subflow->map_csum_len; 960 for (;;) { 961 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; 962 offset = seq - TCP_SKB_CB(skb)->seq; 963 964 /* if the current skb has not been accounted yet, csum its contents 965 * up to the amount covered by the current DSS 966 */ 967 if (offset < skb->len) { 968 __wsum csum; 969 970 len = min(skb->len - offset, delta); 971 csum = skb_checksum(skb, offset, len, 0); 972 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, 973 subflow->map_csum_len); 974 975 delta -= len; 976 subflow->map_csum_len += len; 977 } 978 if (delta == 0) 979 break; 980 981 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { 982 /* if this subflow is closed, the partial mapping 983 * will be never completed; flush the pending skbs, so 984 * that subflow_sched_work_if_closed() can kick in 985 */ 986 if (unlikely(ssk->sk_state == TCP_CLOSE)) 987 while ((skb = skb_peek(&ssk->sk_receive_queue))) 988 sk_eat_skb(ssk, skb); 989 990 /* not enough data to validate the csum */ 991 return MAPPING_EMPTY; 992 } 993 994 /* the DSS mapping for next skbs will be validated later, 995 * when a get_mapping_status call will process such skb 996 */ 997 skb = skb->next; 998 } 999 1000 /* note that 'map_data_len' accounts only for the carried data, does 1001 * not include the eventual seq increment due to the data fin, 1002 * while the pseudo header requires the original DSS data len, 1003 * including that 1004 */ 1005 csum = __mptcp_make_csum(subflow->map_seq, 1006 subflow->map_subflow_seq, 1007 subflow->map_data_len + subflow->map_data_fin, 1008 subflow->map_data_csum); 1009 if (unlikely(csum)) { 1010 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR); 1011 return MAPPING_BAD_CSUM; 1012 } 1013 1014 subflow->valid_csum_seen = 1; 1015 return MAPPING_OK; 1016 } 1017 1018 static enum mapping_status get_mapping_status(struct sock *ssk, 1019 struct mptcp_sock *msk) 1020 { 1021 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1022 bool csum_reqd = READ_ONCE(msk->csum_enabled); 1023 struct mptcp_ext *mpext; 1024 struct sk_buff *skb; 1025 u16 data_len; 1026 u64 map_seq; 1027 1028 skb = skb_peek(&ssk->sk_receive_queue); 1029 if (!skb) 1030 return MAPPING_EMPTY; 1031 1032 if (mptcp_check_fallback(ssk)) 1033 return MAPPING_DUMMY; 1034 1035 mpext = mptcp_get_ext(skb); 1036 if (!mpext || !mpext->use_map) { 1037 if (!subflow->map_valid && !skb->len) { 1038 /* the TCP stack deliver 0 len FIN pkt to the receive 1039 * queue, that is the only 0len pkts ever expected here, 1040 * and we can admit no mapping only for 0 len pkts 1041 */ 1042 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1043 WARN_ONCE(1, "0len seq %d:%d flags %x", 1044 TCP_SKB_CB(skb)->seq, 1045 TCP_SKB_CB(skb)->end_seq, 1046 TCP_SKB_CB(skb)->tcp_flags); 1047 sk_eat_skb(ssk, skb); 1048 return MAPPING_EMPTY; 1049 } 1050 1051 if (!subflow->map_valid) 1052 return MAPPING_INVALID; 1053 1054 goto validate_seq; 1055 } 1056 1057 trace_get_mapping_status(mpext); 1058 1059 data_len = mpext->data_len; 1060 if (data_len == 0) { 1061 pr_debug("infinite mapping received"); 1062 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 1063 subflow->map_data_len = 0; 1064 return MAPPING_INVALID; 1065 } 1066 1067 if (mpext->data_fin == 1) { 1068 if (data_len == 1) { 1069 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, 1070 mpext->dsn64); 1071 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq); 1072 if (subflow->map_valid) { 1073 /* A DATA_FIN might arrive in a DSS 1074 * option before the previous mapping 1075 * has been fully consumed. Continue 1076 * handling the existing mapping. 1077 */ 1078 skb_ext_del(skb, SKB_EXT_MPTCP); 1079 return MAPPING_OK; 1080 } else { 1081 if (updated) 1082 mptcp_schedule_work((struct sock *)msk); 1083 1084 return MAPPING_DATA_FIN; 1085 } 1086 } else { 1087 u64 data_fin_seq = mpext->data_seq + data_len - 1; 1088 1089 /* If mpext->data_seq is a 32-bit value, data_fin_seq 1090 * must also be limited to 32 bits. 1091 */ 1092 if (!mpext->dsn64) 1093 data_fin_seq &= GENMASK_ULL(31, 0); 1094 1095 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); 1096 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d", 1097 data_fin_seq, mpext->dsn64); 1098 } 1099 1100 /* Adjust for DATA_FIN using 1 byte of sequence space */ 1101 data_len--; 1102 } 1103 1104 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64); 1105 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); 1106 1107 if (subflow->map_valid) { 1108 /* Allow replacing only with an identical map */ 1109 if (subflow->map_seq == map_seq && 1110 subflow->map_subflow_seq == mpext->subflow_seq && 1111 subflow->map_data_len == data_len && 1112 subflow->map_csum_reqd == mpext->csum_reqd) { 1113 skb_ext_del(skb, SKB_EXT_MPTCP); 1114 goto validate_csum; 1115 } 1116 1117 /* If this skb data are fully covered by the current mapping, 1118 * the new map would need caching, which is not supported 1119 */ 1120 if (skb_is_fully_mapped(ssk, skb)) { 1121 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); 1122 return MAPPING_INVALID; 1123 } 1124 1125 /* will validate the next map after consuming the current one */ 1126 goto validate_csum; 1127 } 1128 1129 subflow->map_seq = map_seq; 1130 subflow->map_subflow_seq = mpext->subflow_seq; 1131 subflow->map_data_len = data_len; 1132 subflow->map_valid = 1; 1133 subflow->map_data_fin = mpext->data_fin; 1134 subflow->mpc_map = mpext->mpc_map; 1135 subflow->map_csum_reqd = mpext->csum_reqd; 1136 subflow->map_csum_len = 0; 1137 subflow->map_data_csum = csum_unfold(mpext->csum); 1138 1139 /* Cfr RFC 8684 Section 3.3.0 */ 1140 if (unlikely(subflow->map_csum_reqd != csum_reqd)) 1141 return MAPPING_INVALID; 1142 1143 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", 1144 subflow->map_seq, subflow->map_subflow_seq, 1145 subflow->map_data_len, subflow->map_csum_reqd, 1146 subflow->map_data_csum); 1147 1148 validate_seq: 1149 /* we revalidate valid mapping on new skb, because we must ensure 1150 * the current skb is completely covered by the available mapping 1151 */ 1152 if (!validate_mapping(ssk, skb)) { 1153 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH); 1154 return MAPPING_INVALID; 1155 } 1156 1157 skb_ext_del(skb, SKB_EXT_MPTCP); 1158 1159 validate_csum: 1160 return validate_data_csum(ssk, skb, csum_reqd); 1161 } 1162 1163 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, 1164 u64 limit) 1165 { 1166 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1167 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 1168 u32 incr; 1169 1170 incr = limit >= skb->len ? skb->len + fin : limit; 1171 1172 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len, 1173 subflow->map_subflow_seq); 1174 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); 1175 tcp_sk(ssk)->copied_seq += incr; 1176 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) 1177 sk_eat_skb(ssk, skb); 1178 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) 1179 subflow->map_valid = 0; 1180 } 1181 1182 /* sched mptcp worker to remove the subflow if no more data is pending */ 1183 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) 1184 { 1185 if (likely(ssk->sk_state != TCP_CLOSE)) 1186 return; 1187 1188 if (skb_queue_empty(&ssk->sk_receive_queue) && 1189 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 1190 mptcp_schedule_work((struct sock *)msk); 1191 } 1192 1193 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) 1194 { 1195 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 1196 1197 if (subflow->mp_join) 1198 return false; 1199 else if (READ_ONCE(msk->csum_enabled)) 1200 return !subflow->valid_csum_seen; 1201 else 1202 return !subflow->fully_established; 1203 } 1204 1205 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) 1206 { 1207 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1208 unsigned long fail_tout; 1209 1210 /* greceful failure can happen only on the MPC subflow */ 1211 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) 1212 return; 1213 1214 /* since the close timeout take precedence on the fail one, 1215 * no need to start the latter when the first is already set 1216 */ 1217 if (sock_flag((struct sock *)msk, SOCK_DEAD)) 1218 return; 1219 1220 /* we don't need extreme accuracy here, use a zero fail_tout as special 1221 * value meaning no fail timeout at all; 1222 */ 1223 fail_tout = jiffies + TCP_RTO_MAX; 1224 if (!fail_tout) 1225 fail_tout = 1; 1226 WRITE_ONCE(subflow->fail_tout, fail_tout); 1227 tcp_send_ack(ssk); 1228 1229 mptcp_reset_timeout(msk, subflow->fail_tout); 1230 } 1231 1232 static bool subflow_check_data_avail(struct sock *ssk) 1233 { 1234 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1235 enum mapping_status status; 1236 struct mptcp_sock *msk; 1237 struct sk_buff *skb; 1238 1239 if (!skb_peek(&ssk->sk_receive_queue)) 1240 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1241 if (subflow->data_avail) 1242 return true; 1243 1244 msk = mptcp_sk(subflow->conn); 1245 for (;;) { 1246 u64 ack_seq; 1247 u64 old_ack; 1248 1249 status = get_mapping_status(ssk, msk); 1250 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); 1251 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY || 1252 status == MAPPING_BAD_CSUM)) 1253 goto fallback; 1254 1255 if (status != MAPPING_OK) 1256 goto no_data; 1257 1258 skb = skb_peek(&ssk->sk_receive_queue); 1259 if (WARN_ON_ONCE(!skb)) 1260 goto no_data; 1261 1262 if (unlikely(!READ_ONCE(msk->can_ack))) 1263 goto fallback; 1264 1265 old_ack = READ_ONCE(msk->ack_seq); 1266 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 1267 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, 1268 ack_seq); 1269 if (unlikely(before64(ack_seq, old_ack))) { 1270 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); 1271 continue; 1272 } 1273 1274 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1275 break; 1276 } 1277 return true; 1278 1279 no_data: 1280 subflow_sched_work_if_closed(msk, ssk); 1281 return false; 1282 1283 fallback: 1284 if (!__mptcp_check_fallback(msk)) { 1285 /* RFC 8684 section 3.7. */ 1286 if (status == MAPPING_BAD_CSUM && 1287 (subflow->mp_join || subflow->valid_csum_seen)) { 1288 subflow->send_mp_fail = 1; 1289 1290 if (!READ_ONCE(msk->allow_infinite_fallback)) { 1291 subflow->reset_transient = 0; 1292 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; 1293 goto reset; 1294 } 1295 mptcp_subflow_fail(msk, ssk); 1296 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1297 return true; 1298 } 1299 1300 if (!subflow_can_fallback(subflow) && subflow->map_data_len) { 1301 /* fatal protocol error, close the socket. 1302 * subflow_error_report() will introduce the appropriate barriers 1303 */ 1304 subflow->reset_transient = 0; 1305 subflow->reset_reason = MPTCP_RST_EMPTCP; 1306 1307 reset: 1308 WRITE_ONCE(ssk->sk_err, EBADMSG); 1309 tcp_set_state(ssk, TCP_CLOSE); 1310 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1311 sk_eat_skb(ssk, skb); 1312 tcp_send_active_reset(ssk, GFP_ATOMIC); 1313 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1314 return false; 1315 } 1316 1317 mptcp_do_fallback(ssk); 1318 } 1319 1320 skb = skb_peek(&ssk->sk_receive_queue); 1321 subflow->map_valid = 1; 1322 subflow->map_seq = READ_ONCE(msk->ack_seq); 1323 subflow->map_data_len = skb->len; 1324 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 1325 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); 1326 return true; 1327 } 1328 1329 bool mptcp_subflow_data_available(struct sock *sk) 1330 { 1331 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1332 1333 /* check if current mapping is still valid */ 1334 if (subflow->map_valid && 1335 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 1336 subflow->map_valid = 0; 1337 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA); 1338 1339 pr_debug("Done with mapping: seq=%u data_len=%u", 1340 subflow->map_subflow_seq, 1341 subflow->map_data_len); 1342 } 1343 1344 return subflow_check_data_avail(sk); 1345 } 1346 1347 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, 1348 * not the ssk one. 1349 * 1350 * In mptcp, rwin is about the mptcp-level connection data. 1351 * 1352 * Data that is still on the ssk rx queue can thus be ignored, 1353 * as far as mptcp peer is concerned that data is still inflight. 1354 * DSS ACK is updated when skb is moved to the mptcp rx queue. 1355 */ 1356 void mptcp_space(const struct sock *ssk, int *space, int *full_space) 1357 { 1358 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1359 const struct sock *sk = subflow->conn; 1360 1361 *space = __mptcp_space(sk); 1362 *full_space = tcp_full_space(sk); 1363 } 1364 1365 void __mptcp_error_report(struct sock *sk) 1366 { 1367 struct mptcp_subflow_context *subflow; 1368 struct mptcp_sock *msk = mptcp_sk(sk); 1369 1370 mptcp_for_each_subflow(msk, subflow) { 1371 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 1372 int err = sock_error(ssk); 1373 int ssk_state; 1374 1375 if (!err) 1376 continue; 1377 1378 /* only propagate errors on fallen-back sockets or 1379 * on MPC connect 1380 */ 1381 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk)) 1382 continue; 1383 1384 /* We need to propagate only transition to CLOSE state. 1385 * Orphaned socket will see such state change via 1386 * subflow_sched_work_if_closed() and that path will properly 1387 * destroy the msk as needed. 1388 */ 1389 ssk_state = inet_sk_state_load(ssk); 1390 if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) 1391 inet_sk_state_store(sk, ssk_state); 1392 WRITE_ONCE(sk->sk_err, -err); 1393 1394 /* This barrier is coupled with smp_rmb() in mptcp_poll() */ 1395 smp_wmb(); 1396 sk_error_report(sk); 1397 break; 1398 } 1399 } 1400 1401 static void subflow_error_report(struct sock *ssk) 1402 { 1403 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1404 1405 /* bail early if this is a no-op, so that we avoid introducing a 1406 * problematic lockdep dependency between TCP accept queue lock 1407 * and msk socket spinlock 1408 */ 1409 if (!sk->sk_socket) 1410 return; 1411 1412 mptcp_data_lock(sk); 1413 if (!sock_owned_by_user(sk)) 1414 __mptcp_error_report(sk); 1415 else 1416 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags); 1417 mptcp_data_unlock(sk); 1418 } 1419 1420 static void subflow_data_ready(struct sock *sk) 1421 { 1422 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1423 u16 state = 1 << inet_sk_state_load(sk); 1424 struct sock *parent = subflow->conn; 1425 struct mptcp_sock *msk; 1426 1427 trace_sk_data_ready(sk); 1428 1429 msk = mptcp_sk(parent); 1430 if (state & TCPF_LISTEN) { 1431 /* MPJ subflow are removed from accept queue before reaching here, 1432 * avoid stray wakeups 1433 */ 1434 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) 1435 return; 1436 1437 parent->sk_data_ready(parent); 1438 return; 1439 } 1440 1441 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && 1442 !subflow->mp_join && !(state & TCPF_CLOSE)); 1443 1444 if (mptcp_subflow_data_available(sk)) 1445 mptcp_data_ready(parent, sk); 1446 else if (unlikely(sk->sk_err)) 1447 subflow_error_report(sk); 1448 } 1449 1450 static void subflow_write_space(struct sock *ssk) 1451 { 1452 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1453 1454 mptcp_propagate_sndbuf(sk, ssk); 1455 mptcp_write_space(sk); 1456 } 1457 1458 static const struct inet_connection_sock_af_ops * 1459 subflow_default_af_ops(struct sock *sk) 1460 { 1461 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1462 if (sk->sk_family == AF_INET6) 1463 return &subflow_v6_specific; 1464 #endif 1465 return &subflow_specific; 1466 } 1467 1468 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1469 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 1470 { 1471 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1472 struct inet_connection_sock *icsk = inet_csk(sk); 1473 const struct inet_connection_sock_af_ops *target; 1474 1475 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 1476 1477 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", 1478 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 1479 1480 if (likely(icsk->icsk_af_ops == target)) 1481 return; 1482 1483 subflow->icsk_af_ops = icsk->icsk_af_ops; 1484 icsk->icsk_af_ops = target; 1485 } 1486 #endif 1487 1488 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 1489 struct sockaddr_storage *addr, 1490 unsigned short family) 1491 { 1492 memset(addr, 0, sizeof(*addr)); 1493 addr->ss_family = family; 1494 if (addr->ss_family == AF_INET) { 1495 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; 1496 1497 if (info->family == AF_INET) 1498 in_addr->sin_addr = info->addr; 1499 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1500 else if (ipv6_addr_v4mapped(&info->addr6)) 1501 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3]; 1502 #endif 1503 in_addr->sin_port = info->port; 1504 } 1505 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1506 else if (addr->ss_family == AF_INET6) { 1507 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; 1508 1509 if (info->family == AF_INET) 1510 ipv6_addr_set_v4mapped(info->addr.s_addr, 1511 &in6_addr->sin6_addr); 1512 else 1513 in6_addr->sin6_addr = info->addr6; 1514 in6_addr->sin6_port = info->port; 1515 } 1516 #endif 1517 } 1518 1519 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, 1520 const struct mptcp_addr_info *remote) 1521 { 1522 struct mptcp_sock *msk = mptcp_sk(sk); 1523 struct mptcp_subflow_context *subflow; 1524 struct sockaddr_storage addr; 1525 int remote_id = remote->id; 1526 int local_id = loc->id; 1527 int err = -ENOTCONN; 1528 struct socket *sf; 1529 struct sock *ssk; 1530 u32 remote_token; 1531 int addrlen; 1532 int ifindex; 1533 u8 flags; 1534 1535 if (!mptcp_is_fully_established(sk)) 1536 goto err_out; 1537 1538 err = mptcp_subflow_create_socket(sk, loc->family, &sf); 1539 if (err) 1540 goto err_out; 1541 1542 ssk = sf->sk; 1543 subflow = mptcp_subflow_ctx(ssk); 1544 do { 1545 get_random_bytes(&subflow->local_nonce, sizeof(u32)); 1546 } while (!subflow->local_nonce); 1547 1548 if (local_id) 1549 subflow_set_local_id(subflow, local_id); 1550 1551 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id, 1552 &flags, &ifindex); 1553 subflow->remote_key_valid = 1; 1554 subflow->remote_key = msk->remote_key; 1555 subflow->local_key = msk->local_key; 1556 subflow->token = msk->token; 1557 mptcp_info2sockaddr(loc, &addr, ssk->sk_family); 1558 1559 addrlen = sizeof(struct sockaddr_in); 1560 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1561 if (addr.ss_family == AF_INET6) 1562 addrlen = sizeof(struct sockaddr_in6); 1563 #endif 1564 mptcp_sockopt_sync(msk, ssk); 1565 1566 ssk->sk_bound_dev_if = ifindex; 1567 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); 1568 if (err) 1569 goto failed; 1570 1571 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 1572 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk, 1573 remote_token, local_id, remote_id); 1574 subflow->remote_token = remote_token; 1575 subflow->remote_id = remote_id; 1576 subflow->request_join = 1; 1577 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1578 subflow->subflow_id = msk->subflow_id++; 1579 mptcp_info2sockaddr(remote, &addr, ssk->sk_family); 1580 1581 sock_hold(ssk); 1582 list_add_tail(&subflow->node, &msk->conn_list); 1583 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); 1584 if (err && err != -EINPROGRESS) 1585 goto failed_unlink; 1586 1587 /* discard the subflow socket */ 1588 mptcp_sock_graft(ssk, sk->sk_socket); 1589 iput(SOCK_INODE(sf)); 1590 WRITE_ONCE(msk->allow_infinite_fallback, false); 1591 return 0; 1592 1593 failed_unlink: 1594 list_del(&subflow->node); 1595 sock_put(mptcp_subflow_tcp_sock(subflow)); 1596 1597 failed: 1598 subflow->disposable = 1; 1599 sock_release(sf); 1600 1601 err_out: 1602 /* we account subflows before the creation, and this failures will not 1603 * be caught by sk_state_change() 1604 */ 1605 mptcp_pm_close_subflow(msk); 1606 return err; 1607 } 1608 1609 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child) 1610 { 1611 #ifdef CONFIG_SOCK_CGROUP_DATA 1612 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data, 1613 *child_skcd = &child->sk_cgrp_data; 1614 1615 /* only the additional subflows created by kworkers have to be modified */ 1616 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) != 1617 cgroup_id(sock_cgroup_ptr(child_skcd))) { 1618 #ifdef CONFIG_MEMCG 1619 struct mem_cgroup *memcg = parent->sk_memcg; 1620 1621 mem_cgroup_sk_free(child); 1622 if (memcg && css_tryget(&memcg->css)) 1623 child->sk_memcg = memcg; 1624 #endif /* CONFIG_MEMCG */ 1625 1626 cgroup_sk_free(child_skcd); 1627 *child_skcd = *parent_skcd; 1628 cgroup_sk_clone(child_skcd); 1629 } 1630 #endif /* CONFIG_SOCK_CGROUP_DATA */ 1631 } 1632 1633 static void mptcp_subflow_ops_override(struct sock *ssk) 1634 { 1635 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1636 if (ssk->sk_prot == &tcpv6_prot) 1637 ssk->sk_prot = &tcpv6_prot_override; 1638 else 1639 #endif 1640 ssk->sk_prot = &tcp_prot_override; 1641 } 1642 1643 static void mptcp_subflow_ops_undo_override(struct sock *ssk) 1644 { 1645 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1646 if (ssk->sk_prot == &tcpv6_prot_override) 1647 ssk->sk_prot = &tcpv6_prot; 1648 else 1649 #endif 1650 ssk->sk_prot = &tcp_prot; 1651 } 1652 1653 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 1654 struct socket **new_sock) 1655 { 1656 struct mptcp_subflow_context *subflow; 1657 struct net *net = sock_net(sk); 1658 struct socket *sf; 1659 int err; 1660 1661 /* un-accepted server sockets can reach here - on bad configuration 1662 * bail early to avoid greater trouble later 1663 */ 1664 if (unlikely(!sk->sk_socket)) 1665 return -EINVAL; 1666 1667 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); 1668 if (err) 1669 return err; 1670 1671 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); 1672 1673 err = security_mptcp_add_subflow(sk, sf->sk); 1674 if (err) 1675 goto release_ssk; 1676 1677 /* the newly created socket has to be in the same cgroup as its parent */ 1678 mptcp_attach_cgroup(sk, sf->sk); 1679 1680 /* kernel sockets do not by default acquire net ref, but TCP timer 1681 * needs it. 1682 * Update ns_tracker to current stack trace and refcounted tracker. 1683 */ 1684 __netns_tracker_free(net, &sf->sk->ns_tracker, false); 1685 sf->sk->sk_net_refcnt = 1; 1686 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); 1687 sock_inuse_add(net, 1); 1688 err = tcp_set_ulp(sf->sk, "mptcp"); 1689 1690 release_ssk: 1691 release_sock(sf->sk); 1692 1693 if (err) { 1694 sock_release(sf); 1695 return err; 1696 } 1697 1698 /* the newly created socket really belongs to the owning MPTCP master 1699 * socket, even if for additional subflows the allocation is performed 1700 * by a kernel workqueue. Adjust inode references, so that the 1701 * procfs/diag interfaces really show this one belonging to the correct 1702 * user. 1703 */ 1704 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; 1705 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; 1706 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1707 1708 subflow = mptcp_subflow_ctx(sf->sk); 1709 pr_debug("subflow=%p", subflow); 1710 1711 *new_sock = sf; 1712 sock_hold(sk); 1713 subflow->conn = sk; 1714 mptcp_subflow_ops_override(sf->sk); 1715 1716 return 0; 1717 } 1718 1719 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 1720 gfp_t priority) 1721 { 1722 struct inet_connection_sock *icsk = inet_csk(sk); 1723 struct mptcp_subflow_context *ctx; 1724 1725 ctx = kzalloc(sizeof(*ctx), priority); 1726 if (!ctx) 1727 return NULL; 1728 1729 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 1730 INIT_LIST_HEAD(&ctx->node); 1731 INIT_LIST_HEAD(&ctx->delegated_node); 1732 1733 pr_debug("subflow=%p", ctx); 1734 1735 ctx->tcp_sock = sk; 1736 1737 return ctx; 1738 } 1739 1740 static void __subflow_state_change(struct sock *sk) 1741 { 1742 struct socket_wq *wq; 1743 1744 rcu_read_lock(); 1745 wq = rcu_dereference(sk->sk_wq); 1746 if (skwq_has_sleeper(wq)) 1747 wake_up_interruptible_all(&wq->wait); 1748 rcu_read_unlock(); 1749 } 1750 1751 static bool subflow_is_done(const struct sock *sk) 1752 { 1753 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 1754 } 1755 1756 static void subflow_state_change(struct sock *sk) 1757 { 1758 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1759 struct sock *parent = subflow->conn; 1760 struct mptcp_sock *msk; 1761 1762 __subflow_state_change(sk); 1763 1764 msk = mptcp_sk(parent); 1765 if (subflow_simultaneous_connect(sk)) { 1766 mptcp_propagate_sndbuf(parent, sk); 1767 mptcp_do_fallback(sk); 1768 mptcp_rcv_space_init(msk, sk); 1769 pr_fallback(msk); 1770 subflow->conn_finished = 1; 1771 mptcp_set_connected(parent); 1772 } 1773 1774 /* as recvmsg() does not acquire the subflow socket for ssk selection 1775 * a fin packet carrying a DSS can be unnoticed if we don't trigger 1776 * the data available machinery here. 1777 */ 1778 if (mptcp_subflow_data_available(sk)) 1779 mptcp_data_ready(parent, sk); 1780 else if (unlikely(sk->sk_err)) 1781 subflow_error_report(sk); 1782 1783 subflow_sched_work_if_closed(mptcp_sk(parent), sk); 1784 1785 /* when the fallback subflow closes the rx side, trigger a 'dummy' 1786 * ingress data fin, so that the msk state will follow along 1787 */ 1788 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk && 1789 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) 1790 mptcp_schedule_work(parent); 1791 } 1792 1793 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) 1794 { 1795 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; 1796 struct mptcp_sock *msk, *next, *head = NULL; 1797 struct request_sock *req; 1798 struct sock *sk; 1799 1800 /* build a list of all unaccepted mptcp sockets */ 1801 spin_lock_bh(&queue->rskq_lock); 1802 for (req = queue->rskq_accept_head; req; req = req->dl_next) { 1803 struct mptcp_subflow_context *subflow; 1804 struct sock *ssk = req->sk; 1805 1806 if (!sk_is_mptcp(ssk)) 1807 continue; 1808 1809 subflow = mptcp_subflow_ctx(ssk); 1810 if (!subflow || !subflow->conn) 1811 continue; 1812 1813 /* skip if already in list */ 1814 sk = subflow->conn; 1815 msk = mptcp_sk(sk); 1816 if (msk->dl_next || msk == head) 1817 continue; 1818 1819 sock_hold(sk); 1820 msk->dl_next = head; 1821 head = msk; 1822 } 1823 spin_unlock_bh(&queue->rskq_lock); 1824 if (!head) 1825 return; 1826 1827 /* can't acquire the msk socket lock under the subflow one, 1828 * or will cause ABBA deadlock 1829 */ 1830 release_sock(listener_ssk); 1831 1832 for (msk = head; msk; msk = next) { 1833 sk = (struct sock *)msk; 1834 1835 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 1836 next = msk->dl_next; 1837 msk->dl_next = NULL; 1838 1839 __mptcp_unaccepted_force_close(sk); 1840 release_sock(sk); 1841 1842 /* lockdep will report a false positive ABBA deadlock 1843 * between cancel_work_sync and the listener socket. 1844 * The involved locks belong to different sockets WRT 1845 * the existing AB chain. 1846 * Using a per socket key is problematic as key 1847 * deregistration requires process context and must be 1848 * performed at socket disposal time, in atomic 1849 * context. 1850 * Just tell lockdep to consider the listener socket 1851 * released here. 1852 */ 1853 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); 1854 mptcp_cancel_work(sk); 1855 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_); 1856 1857 sock_put(sk); 1858 } 1859 1860 /* we are still under the listener msk socket lock */ 1861 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); 1862 } 1863 1864 static int subflow_ulp_init(struct sock *sk) 1865 { 1866 struct inet_connection_sock *icsk = inet_csk(sk); 1867 struct mptcp_subflow_context *ctx; 1868 struct tcp_sock *tp = tcp_sk(sk); 1869 int err = 0; 1870 1871 /* disallow attaching ULP to a socket unless it has been 1872 * created with sock_create_kern() 1873 */ 1874 if (!sk->sk_kern_sock) { 1875 err = -EOPNOTSUPP; 1876 goto out; 1877 } 1878 1879 ctx = subflow_create_ctx(sk, GFP_KERNEL); 1880 if (!ctx) { 1881 err = -ENOMEM; 1882 goto out; 1883 } 1884 1885 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); 1886 1887 tp->is_mptcp = 1; 1888 ctx->icsk_af_ops = icsk->icsk_af_ops; 1889 icsk->icsk_af_ops = subflow_default_af_ops(sk); 1890 ctx->tcp_state_change = sk->sk_state_change; 1891 ctx->tcp_error_report = sk->sk_error_report; 1892 1893 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable); 1894 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space); 1895 1896 sk->sk_data_ready = subflow_data_ready; 1897 sk->sk_write_space = subflow_write_space; 1898 sk->sk_state_change = subflow_state_change; 1899 sk->sk_error_report = subflow_error_report; 1900 out: 1901 return err; 1902 } 1903 1904 static void subflow_ulp_release(struct sock *ssk) 1905 { 1906 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 1907 bool release = true; 1908 struct sock *sk; 1909 1910 if (!ctx) 1911 return; 1912 1913 sk = ctx->conn; 1914 if (sk) { 1915 /* if the msk has been orphaned, keep the ctx 1916 * alive, will be freed by __mptcp_close_ssk(), 1917 * when the subflow is still unaccepted 1918 */ 1919 release = ctx->disposable || list_empty(&ctx->node); 1920 1921 /* inet_child_forget() does not call sk_state_change(), 1922 * explicitly trigger the socket close machinery 1923 */ 1924 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, 1925 &mptcp_sk(sk)->flags)) 1926 mptcp_schedule_work(sk); 1927 sock_put(sk); 1928 } 1929 1930 mptcp_subflow_ops_undo_override(ssk); 1931 if (release) 1932 kfree_rcu(ctx, rcu); 1933 } 1934 1935 static void subflow_ulp_clone(const struct request_sock *req, 1936 struct sock *newsk, 1937 const gfp_t priority) 1938 { 1939 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 1940 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 1941 struct mptcp_subflow_context *new_ctx; 1942 1943 if (!tcp_rsk(req)->is_mptcp || 1944 (!subflow_req->mp_capable && !subflow_req->mp_join)) { 1945 subflow_ulp_fallback(newsk, old_ctx); 1946 return; 1947 } 1948 1949 new_ctx = subflow_create_ctx(newsk, priority); 1950 if (!new_ctx) { 1951 subflow_ulp_fallback(newsk, old_ctx); 1952 return; 1953 } 1954 1955 new_ctx->conn_finished = 1; 1956 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 1957 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 1958 new_ctx->tcp_error_report = old_ctx->tcp_error_report; 1959 new_ctx->rel_write_seq = 1; 1960 new_ctx->tcp_sock = newsk; 1961 1962 if (subflow_req->mp_capable) { 1963 /* see comments in subflow_syn_recv_sock(), MPTCP connection 1964 * is fully established only after we receive the remote key 1965 */ 1966 new_ctx->mp_capable = 1; 1967 new_ctx->local_key = subflow_req->local_key; 1968 new_ctx->token = subflow_req->token; 1969 new_ctx->ssn_offset = subflow_req->ssn_offset; 1970 new_ctx->idsn = subflow_req->idsn; 1971 1972 /* this is the first subflow, id is always 0 */ 1973 new_ctx->local_id_valid = 1; 1974 } else if (subflow_req->mp_join) { 1975 new_ctx->ssn_offset = subflow_req->ssn_offset; 1976 new_ctx->mp_join = 1; 1977 new_ctx->fully_established = 1; 1978 new_ctx->remote_key_valid = 1; 1979 new_ctx->backup = subflow_req->backup; 1980 new_ctx->remote_id = subflow_req->remote_id; 1981 new_ctx->token = subflow_req->token; 1982 new_ctx->thmac = subflow_req->thmac; 1983 1984 /* the subflow req id is valid, fetched via subflow_check_req() 1985 * and subflow_token_join_request() 1986 */ 1987 subflow_set_local_id(new_ctx, subflow_req->local_id); 1988 } 1989 } 1990 1991 static void tcp_release_cb_override(struct sock *ssk) 1992 { 1993 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1994 1995 if (mptcp_subflow_has_delegated_action(subflow)) 1996 mptcp_subflow_process_delegated(ssk); 1997 1998 tcp_release_cb(ssk); 1999 } 2000 2001 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 2002 .name = "mptcp", 2003 .owner = THIS_MODULE, 2004 .init = subflow_ulp_init, 2005 .release = subflow_ulp_release, 2006 .clone = subflow_ulp_clone, 2007 }; 2008 2009 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 2010 { 2011 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 2012 2013 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 2014 subflow_ops->obj_size, 0, 2015 SLAB_ACCOUNT | 2016 SLAB_TYPESAFE_BY_RCU, 2017 NULL); 2018 if (!subflow_ops->slab) 2019 return -ENOMEM; 2020 2021 return 0; 2022 } 2023 2024 void __init mptcp_subflow_init(void) 2025 { 2026 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; 2027 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; 2028 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; 2029 2030 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) 2031 panic("MPTCP: failed to init subflow v4 request sock ops\n"); 2032 2033 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 2034 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; 2035 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack; 2036 2037 subflow_specific = ipv4_specific; 2038 subflow_specific.conn_request = subflow_v4_conn_request; 2039 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 2040 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 2041 subflow_specific.rebuild_header = subflow_rebuild_header; 2042 2043 tcp_prot_override = tcp_prot; 2044 tcp_prot_override.release_cb = tcp_release_cb_override; 2045 2046 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2047 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock 2048 * structures for v4 and v6 have the same size. It should not changed in 2049 * the future but better to make sure to be warned if it is no longer 2050 * the case. 2051 */ 2052 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); 2053 2054 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; 2055 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; 2056 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; 2057 2058 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) 2059 panic("MPTCP: failed to init subflow v6 request sock ops\n"); 2060 2061 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 2062 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; 2063 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack; 2064 2065 subflow_v6_specific = ipv6_specific; 2066 subflow_v6_specific.conn_request = subflow_v6_conn_request; 2067 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 2068 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 2069 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header; 2070 2071 subflow_v6m_specific = subflow_v6_specific; 2072 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 2073 subflow_v6m_specific.send_check = ipv4_specific.send_check; 2074 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 2075 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 2076 subflow_v6m_specific.net_frag_header_len = 0; 2077 subflow_v6m_specific.rebuild_header = subflow_rebuild_header; 2078 2079 tcpv6_prot_override = tcpv6_prot; 2080 tcpv6_prot_override.release_cb = tcp_release_cb_override; 2081 #endif 2082 2083 mptcp_diag_subflow_init(&subflow_ulp_ops); 2084 2085 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 2086 panic("MPTCP: failed to register subflows to ULP\n"); 2087 } 2088