1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/sha2.h> 13 #include <crypto/utils.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 19 #include <net/ip6_route.h> 20 #include <net/transp_v6.h> 21 #endif 22 #include <net/mptcp.h> 23 24 #include "protocol.h" 25 #include "mib.h" 26 27 #include <trace/events/mptcp.h> 28 #include <trace/events/sock.h> 29 30 static void mptcp_subflow_ops_undo_override(struct sock *ssk); 31 32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, 33 enum linux_mptcp_mib_field field) 34 { 35 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); 36 } 37 38 static void subflow_req_destructor(struct request_sock *req) 39 { 40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 41 42 pr_debug("subflow_req=%p\n", subflow_req); 43 44 if (subflow_req->msk) 45 sock_put((struct sock *)subflow_req->msk); 46 47 mptcp_token_destroy_request(req); 48 } 49 50 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, 51 void *hmac) 52 { 53 u8 msg[8]; 54 55 put_unaligned_be32(nonce1, &msg[0]); 56 put_unaligned_be32(nonce2, &msg[4]); 57 58 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 59 } 60 61 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) 62 { 63 return mptcp_is_fully_established((void *)msk) && 64 ((mptcp_pm_is_userspace(msk) && 65 mptcp_userspace_pm_active(msk)) || 66 READ_ONCE(msk->pm.accept_subflow)); 67 } 68 69 /* validate received token and create truncated hmac and nonce for SYN-ACK */ 70 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req) 71 { 72 struct mptcp_sock *msk = subflow_req->msk; 73 u8 hmac[SHA256_DIGEST_SIZE]; 74 75 get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); 76 77 subflow_generate_hmac(READ_ONCE(msk->local_key), 78 READ_ONCE(msk->remote_key), 79 subflow_req->local_nonce, 80 subflow_req->remote_nonce, hmac); 81 82 subflow_req->thmac = get_unaligned_be64(hmac); 83 } 84 85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) 86 { 87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 88 struct mptcp_sock *msk; 89 int local_id; 90 91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token); 92 if (!msk) { 93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 94 return NULL; 95 } 96 97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); 98 if (local_id < 0) { 99 sock_put((struct sock *)msk); 100 return NULL; 101 } 102 subflow_req->local_id = local_id; 103 subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req); 104 105 return msk; 106 } 107 108 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) 109 { 110 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 111 112 subflow_req->mp_capable = 0; 113 subflow_req->mp_join = 0; 114 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); 115 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener)); 116 subflow_req->msk = NULL; 117 mptcp_token_init_request(req); 118 } 119 120 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) 121 { 122 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; 123 } 124 125 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason) 126 { 127 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 128 129 if (mpext) { 130 memset(mpext, 0, sizeof(*mpext)); 131 mpext->reset_reason = reason; 132 } 133 } 134 135 static int subflow_reset_req_endp(struct request_sock *req, struct sk_buff *skb) 136 { 137 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEENDPATTEMPT); 138 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 139 return -EPERM; 140 } 141 142 /* Init mptcp request socket. 143 * 144 * Returns an error code if a JOIN has failed and a TCP reset 145 * should be sent. 146 */ 147 static int subflow_check_req(struct request_sock *req, 148 const struct sock *sk_listener, 149 struct sk_buff *skb) 150 { 151 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 152 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 153 struct mptcp_options_received mp_opt; 154 bool opt_mp_capable, opt_mp_join; 155 156 pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener); 157 158 #ifdef CONFIG_TCP_MD5SIG 159 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 160 * TCP option space. 161 */ 162 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) { 163 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 164 return -EINVAL; 165 } 166 #endif 167 168 mptcp_get_options(skb, &mp_opt); 169 170 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN); 171 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN); 172 if (opt_mp_capable) { 173 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 174 175 if (unlikely(listener->pm_listener)) 176 return subflow_reset_req_endp(req, skb); 177 if (opt_mp_join) 178 return 0; 179 } else if (opt_mp_join) { 180 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 181 182 if (mp_opt.backup) 183 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX); 184 } else if (unlikely(listener->pm_listener)) { 185 return subflow_reset_req_endp(req, skb); 186 } 187 188 if (opt_mp_capable && listener->request_mptcp) { 189 int err, retries = MPTCP_TOKEN_MAX_RETRIES; 190 191 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 192 again: 193 do { 194 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); 195 } while (subflow_req->local_key == 0); 196 197 if (unlikely(req->syncookie)) { 198 mptcp_crypto_key_sha(subflow_req->local_key, 199 &subflow_req->token, 200 &subflow_req->idsn); 201 if (mptcp_token_exists(subflow_req->token)) { 202 if (retries-- > 0) 203 goto again; 204 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 205 } else { 206 subflow_req->mp_capable = 1; 207 } 208 return 0; 209 } 210 211 err = mptcp_token_new_request(req); 212 if (err == 0) 213 subflow_req->mp_capable = 1; 214 else if (retries-- > 0) 215 goto again; 216 else 217 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 218 219 } else if (opt_mp_join && listener->request_mptcp) { 220 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 221 subflow_req->mp_join = 1; 222 subflow_req->backup = mp_opt.backup; 223 subflow_req->remote_id = mp_opt.join_id; 224 subflow_req->token = mp_opt.token; 225 subflow_req->remote_nonce = mp_opt.nonce; 226 subflow_req->msk = subflow_token_join_request(req); 227 228 /* Can't fall back to TCP in this case. */ 229 if (!subflow_req->msk) { 230 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 231 return -EPERM; 232 } 233 234 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { 235 pr_debug("syn inet_sport=%d %d\n", 236 ntohs(inet_sk(sk_listener)->inet_sport), 237 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); 238 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { 239 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); 240 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 241 return -EPERM; 242 } 243 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX); 244 } 245 246 subflow_req_create_thmac(subflow_req); 247 248 if (unlikely(req->syncookie)) { 249 if (!mptcp_can_accept_new_subflow(subflow_req->msk)) { 250 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINREJECTED); 251 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 252 return -EPERM; 253 } 254 255 subflow_init_req_cookie_join_save(subflow_req, skb); 256 } 257 258 pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token, 259 subflow_req->remote_nonce, subflow_req->msk); 260 } 261 262 return 0; 263 } 264 265 int mptcp_subflow_init_cookie_req(struct request_sock *req, 266 const struct sock *sk_listener, 267 struct sk_buff *skb) 268 { 269 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 270 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 271 struct mptcp_options_received mp_opt; 272 bool opt_mp_capable, opt_mp_join; 273 int err; 274 275 subflow_init_req(req, sk_listener); 276 mptcp_get_options(skb, &mp_opt); 277 278 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK); 279 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK); 280 if (opt_mp_capable && opt_mp_join) 281 return -EINVAL; 282 283 if (opt_mp_capable && listener->request_mptcp) { 284 if (mp_opt.sndr_key == 0) 285 return -EINVAL; 286 287 subflow_req->local_key = mp_opt.rcvr_key; 288 err = mptcp_token_new_request(req); 289 if (err) 290 return err; 291 292 subflow_req->mp_capable = 1; 293 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 294 } else if (opt_mp_join && listener->request_mptcp) { 295 if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) 296 return -EINVAL; 297 298 subflow_req->mp_join = 1; 299 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 300 } 301 302 return 0; 303 } 304 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); 305 306 static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb) 307 { 308 const struct mptcp_ext *mpext = mptcp_get_ext(skb); 309 310 if (!mpext) 311 return SK_RST_REASON_NOT_SPECIFIED; 312 313 return sk_rst_convert_mptcp_reason(mpext->reset_reason); 314 } 315 316 static struct dst_entry *subflow_v4_route_req(const struct sock *sk, 317 struct sk_buff *skb, 318 struct flowi *fl, 319 struct request_sock *req, 320 u32 tw_isn) 321 { 322 struct dst_entry *dst; 323 int err; 324 325 tcp_rsk(req)->is_mptcp = 1; 326 subflow_init_req(req, sk); 327 328 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn); 329 if (!dst) 330 return NULL; 331 332 err = subflow_check_req(req, sk, skb); 333 if (err == 0) 334 return dst; 335 336 dst_release(dst); 337 if (!req->syncookie) 338 tcp_request_sock_ops.send_reset(sk, skb, 339 mptcp_get_rst_reason(skb)); 340 return NULL; 341 } 342 343 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req, 344 struct tcp_fastopen_cookie *foc, 345 enum tcp_synack_type synack_type) 346 { 347 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 348 struct inet_request_sock *ireq = inet_rsk(req); 349 350 /* clear tstamp_ok, as needed depending on cookie */ 351 if (foc && foc->len > -1) 352 ireq->tstamp_ok = 0; 353 354 if (synack_type == TCP_SYNACK_FASTOPEN) 355 mptcp_fastopen_subflow_synack_set_params(subflow, req); 356 } 357 358 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst, 359 struct flowi *fl, 360 struct request_sock *req, 361 struct tcp_fastopen_cookie *foc, 362 enum tcp_synack_type synack_type, 363 struct sk_buff *syn_skb) 364 { 365 subflow_prep_synack(sk, req, foc, synack_type); 366 367 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, 368 synack_type, syn_skb); 369 } 370 371 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 372 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, 373 struct flowi *fl, 374 struct request_sock *req, 375 struct tcp_fastopen_cookie *foc, 376 enum tcp_synack_type synack_type, 377 struct sk_buff *syn_skb) 378 { 379 subflow_prep_synack(sk, req, foc, synack_type); 380 381 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, 382 synack_type, syn_skb); 383 } 384 385 static struct dst_entry *subflow_v6_route_req(const struct sock *sk, 386 struct sk_buff *skb, 387 struct flowi *fl, 388 struct request_sock *req, 389 u32 tw_isn) 390 { 391 struct dst_entry *dst; 392 int err; 393 394 tcp_rsk(req)->is_mptcp = 1; 395 subflow_init_req(req, sk); 396 397 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn); 398 if (!dst) 399 return NULL; 400 401 err = subflow_check_req(req, sk, skb); 402 if (err == 0) 403 return dst; 404 405 dst_release(dst); 406 if (!req->syncookie) 407 tcp6_request_sock_ops.send_reset(sk, skb, 408 mptcp_get_rst_reason(skb)); 409 return NULL; 410 } 411 #endif 412 413 /* validate received truncated hmac and create hmac for third ACK */ 414 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 415 { 416 u8 hmac[SHA256_DIGEST_SIZE]; 417 u64 thmac; 418 419 subflow_generate_hmac(subflow->remote_key, subflow->local_key, 420 subflow->remote_nonce, subflow->local_nonce, 421 hmac); 422 423 thmac = get_unaligned_be64(hmac); 424 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", 425 subflow, subflow->token, thmac, subflow->thmac); 426 427 return thmac == subflow->thmac; 428 } 429 430 void mptcp_subflow_reset(struct sock *ssk) 431 { 432 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 433 struct sock *sk = subflow->conn; 434 435 /* mptcp_mp_fail_no_response() can reach here on an already closed 436 * socket 437 */ 438 if (ssk->sk_state == TCP_CLOSE) 439 return; 440 441 /* must hold: tcp_done() could drop last reference on parent */ 442 sock_hold(sk); 443 444 mptcp_send_active_reset_reason(ssk); 445 tcp_done(ssk); 446 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags)) 447 mptcp_schedule_work(sk); 448 449 sock_put(sk); 450 } 451 452 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk) 453 { 454 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; 455 } 456 457 void __mptcp_sync_state(struct sock *sk, int state) 458 { 459 struct mptcp_subflow_context *subflow; 460 struct mptcp_sock *msk = mptcp_sk(sk); 461 struct sock *ssk = msk->first; 462 463 subflow = mptcp_subflow_ctx(ssk); 464 __mptcp_propagate_sndbuf(sk, ssk); 465 if (!msk->rcvspace_init) 466 mptcp_rcv_space_init(msk, ssk); 467 468 if (sk->sk_state == TCP_SYN_SENT) { 469 /* subflow->idsn is always available is TCP_SYN_SENT state, 470 * even for the FASTOPEN scenarios 471 */ 472 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 473 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 474 mptcp_set_state(sk, state); 475 sk->sk_state_change(sk); 476 } 477 } 478 479 static void subflow_set_remote_key(struct mptcp_sock *msk, 480 struct mptcp_subflow_context *subflow, 481 const struct mptcp_options_received *mp_opt) 482 { 483 /* active MPC subflow will reach here multiple times: 484 * at subflow_finish_connect() time and at 4th ack time 485 */ 486 if (subflow->remote_key_valid) 487 return; 488 489 subflow->remote_key_valid = 1; 490 subflow->remote_key = mp_opt->sndr_key; 491 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); 492 subflow->iasn++; 493 494 WRITE_ONCE(msk->remote_key, subflow->remote_key); 495 WRITE_ONCE(msk->ack_seq, subflow->iasn); 496 WRITE_ONCE(msk->can_ack, true); 497 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); 498 } 499 500 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk, 501 struct mptcp_subflow_context *subflow, 502 const struct mptcp_options_received *mp_opt) 503 { 504 struct mptcp_sock *msk = mptcp_sk(sk); 505 506 mptcp_data_lock(sk); 507 if (mp_opt) { 508 /* Options are available only in the non fallback cases 509 * avoid updating rx path fields otherwise 510 */ 511 WRITE_ONCE(msk->snd_una, subflow->idsn + 1); 512 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); 513 subflow_set_remote_key(msk, subflow, mp_opt); 514 } 515 516 if (!sock_owned_by_user(sk)) { 517 __mptcp_sync_state(sk, ssk->sk_state); 518 } else { 519 msk->pending_state = ssk->sk_state; 520 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); 521 } 522 mptcp_data_unlock(sk); 523 } 524 525 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 526 { 527 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 528 struct mptcp_options_received mp_opt; 529 struct sock *parent = subflow->conn; 530 struct mptcp_sock *msk; 531 532 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 533 534 /* be sure no special action on any packet other than syn-ack */ 535 if (subflow->conn_finished) 536 return; 537 538 msk = mptcp_sk(parent); 539 subflow->rel_write_seq = 1; 540 subflow->conn_finished = 1; 541 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 542 pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset); 543 544 mptcp_get_options(skb, &mp_opt); 545 if (subflow->request_mptcp) { 546 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) { 547 if (!mptcp_try_fallback(sk, 548 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK)) { 549 MPTCP_INC_STATS(sock_net(sk), 550 MPTCP_MIB_FALLBACKFAILED); 551 goto do_reset; 552 } 553 554 goto fallback; 555 } 556 557 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) 558 WRITE_ONCE(msk->csum_enabled, true); 559 if (mp_opt.deny_join_id0) 560 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 561 subflow->mp_capable = 1; 562 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 563 mptcp_finish_connect(sk); 564 mptcp_active_enable(parent); 565 mptcp_propagate_state(parent, sk, subflow, &mp_opt); 566 } else if (subflow->request_join) { 567 u8 hmac[SHA256_DIGEST_SIZE]; 568 569 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) { 570 subflow->reset_reason = MPTCP_RST_EMPTCP; 571 goto do_reset; 572 } 573 574 subflow->backup = mp_opt.backup; 575 subflow->thmac = mp_opt.thmac; 576 subflow->remote_nonce = mp_opt.nonce; 577 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); 578 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n", 579 subflow, subflow->thmac, subflow->remote_nonce, 580 subflow->backup); 581 582 if (!subflow_thmac_valid(subflow)) { 583 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); 584 subflow->reset_reason = MPTCP_RST_EMPTCP; 585 goto do_reset; 586 } 587 588 if (!mptcp_finish_join(sk)) 589 goto do_reset; 590 591 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 592 subflow->local_nonce, 593 subflow->remote_nonce, 594 hmac); 595 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 596 597 subflow->mp_join = 1; 598 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 599 600 if (subflow->backup) 601 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); 602 603 if (subflow_use_different_dport(msk, sk)) { 604 pr_debug("synack inet_dport=%d %d\n", 605 ntohs(inet_sk(sk)->inet_dport), 606 ntohs(inet_sk(parent)->inet_dport)); 607 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); 608 } 609 } else if (mptcp_check_fallback(sk)) { 610 /* It looks like MPTCP is blocked, while TCP is not */ 611 if (subflow->mpc_drop) 612 mptcp_active_disable(parent); 613 fallback: 614 mptcp_propagate_state(parent, sk, subflow, NULL); 615 } 616 return; 617 618 do_reset: 619 subflow->reset_transient = 0; 620 mptcp_subflow_reset(sk); 621 } 622 623 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) 624 { 625 WARN_ON_ONCE(local_id < 0 || local_id > 255); 626 WRITE_ONCE(subflow->local_id, local_id); 627 } 628 629 static int subflow_chk_local_id(struct sock *sk) 630 { 631 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 632 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 633 int err; 634 635 if (likely(subflow->local_id >= 0)) 636 return 0; 637 638 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); 639 if (err < 0) 640 return err; 641 642 subflow_set_local_id(subflow, err); 643 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); 644 645 return 0; 646 } 647 648 static int subflow_rebuild_header(struct sock *sk) 649 { 650 int err = subflow_chk_local_id(sk); 651 652 if (unlikely(err < 0)) 653 return err; 654 655 return inet_sk_rebuild_header(sk); 656 } 657 658 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 659 static int subflow_v6_rebuild_header(struct sock *sk) 660 { 661 int err = subflow_chk_local_id(sk); 662 663 if (unlikely(err < 0)) 664 return err; 665 666 return inet6_sk_rebuild_header(sk); 667 } 668 #endif 669 670 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; 671 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; 672 673 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 674 { 675 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 676 677 pr_debug("subflow=%p\n", subflow); 678 679 /* Never answer to SYNs sent to broadcast or multicast */ 680 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 681 goto drop; 682 683 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, 684 &subflow_request_sock_ipv4_ops, 685 sk, skb); 686 drop: 687 tcp_listendrop(sk); 688 return 0; 689 } 690 691 static void subflow_v4_req_destructor(struct request_sock *req) 692 { 693 subflow_req_destructor(req); 694 tcp_request_sock_ops.destructor(req); 695 } 696 697 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 698 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; 699 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; 700 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; 701 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; 702 static struct proto tcpv6_prot_override __ro_after_init; 703 704 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 705 { 706 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 707 708 pr_debug("subflow=%p\n", subflow); 709 710 if (skb->protocol == htons(ETH_P_IP)) 711 return subflow_v4_conn_request(sk, skb); 712 713 if (!ipv6_unicast_destination(skb)) 714 goto drop; 715 716 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 717 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 718 return 0; 719 } 720 721 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, 722 &subflow_request_sock_ipv6_ops, sk, skb); 723 724 drop: 725 tcp_listendrop(sk); 726 return 0; /* don't send reset */ 727 } 728 729 static void subflow_v6_req_destructor(struct request_sock *req) 730 { 731 subflow_req_destructor(req); 732 tcp6_request_sock_ops.destructor(req); 733 } 734 #endif 735 736 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, 737 struct sock *sk_listener, 738 bool attach_listener) 739 { 740 if (ops->family == AF_INET) 741 ops = &mptcp_subflow_v4_request_sock_ops; 742 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 743 else if (ops->family == AF_INET6) 744 ops = &mptcp_subflow_v6_request_sock_ops; 745 #endif 746 747 return inet_reqsk_alloc(ops, sk_listener, attach_listener); 748 } 749 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); 750 751 /* validate hmac received in third ACK */ 752 static bool subflow_hmac_valid(const struct mptcp_subflow_request_sock *subflow_req, 753 const struct mptcp_options_received *mp_opt) 754 { 755 struct mptcp_sock *msk = subflow_req->msk; 756 u8 hmac[SHA256_DIGEST_SIZE]; 757 758 subflow_generate_hmac(READ_ONCE(msk->remote_key), 759 READ_ONCE(msk->local_key), 760 subflow_req->remote_nonce, 761 subflow_req->local_nonce, hmac); 762 763 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); 764 } 765 766 static void subflow_ulp_fallback(struct sock *sk, 767 struct mptcp_subflow_context *old_ctx) 768 { 769 struct inet_connection_sock *icsk = inet_csk(sk); 770 771 mptcp_subflow_tcp_fallback(sk, old_ctx); 772 icsk->icsk_ulp_ops = NULL; 773 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 774 tcp_sk(sk)->is_mptcp = 0; 775 776 mptcp_subflow_ops_undo_override(sk); 777 } 778 779 void mptcp_subflow_drop_ctx(struct sock *ssk) 780 { 781 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 782 783 if (!ctx) 784 return; 785 786 list_del(&mptcp_subflow_ctx(ssk)->node); 787 if (inet_csk(ssk)->icsk_ulp_ops) { 788 subflow_ulp_fallback(ssk, ctx); 789 if (ctx->conn) 790 sock_put(ctx->conn); 791 } 792 793 kfree_rcu(ctx, rcu); 794 } 795 796 void __mptcp_subflow_fully_established(struct mptcp_sock *msk, 797 struct mptcp_subflow_context *subflow, 798 const struct mptcp_options_received *mp_opt) 799 { 800 subflow_set_remote_key(msk, subflow, mp_opt); 801 WRITE_ONCE(subflow->fully_established, true); 802 WRITE_ONCE(msk->fully_established, true); 803 } 804 805 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 806 struct sk_buff *skb, 807 struct request_sock *req, 808 struct dst_entry *dst, 809 struct request_sock *req_unhash, 810 bool *own_req) 811 { 812 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 813 struct mptcp_subflow_request_sock *subflow_req; 814 struct mptcp_options_received mp_opt; 815 bool fallback, fallback_is_fatal; 816 enum sk_rst_reason reason; 817 struct mptcp_sock *owner; 818 struct sock *child; 819 820 pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn); 821 822 /* After child creation we must look for MPC even when options 823 * are not parsed 824 */ 825 mp_opt.suboptions = 0; 826 827 /* hopefully temporary handling for MP_JOIN+syncookie */ 828 subflow_req = mptcp_subflow_rsk(req); 829 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; 830 fallback = !tcp_rsk(req)->is_mptcp; 831 if (fallback) 832 goto create_child; 833 834 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 835 if (subflow_req->mp_capable) { 836 /* we can receive and accept an in-window, out-of-order pkt, 837 * which may not carry the MP_CAPABLE opt even on mptcp enabled 838 * paths: always try to extract the peer key, and fallback 839 * for packets missing it. 840 * Even OoO DSS packets coming legitly after dropped or 841 * reordered MPC will cause fallback, but we don't have other 842 * options. 843 */ 844 mptcp_get_options(skb, &mp_opt); 845 if (!(mp_opt.suboptions & 846 (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK))) 847 fallback = true; 848 849 } else if (subflow_req->mp_join) { 850 mptcp_get_options(skb, &mp_opt); 851 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK)) 852 fallback = true; 853 } 854 855 create_child: 856 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 857 req_unhash, own_req); 858 859 if (child && *own_req) { 860 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 861 862 tcp_rsk(req)->drop_req = false; 863 864 /* we need to fallback on ctx allocation failure and on pre-reqs 865 * checking above. In the latter scenario we additionally need 866 * to reset the context to non MPTCP status. 867 */ 868 if (!ctx || fallback) { 869 if (fallback_is_fatal) { 870 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 871 goto dispose_child; 872 } 873 goto fallback; 874 } 875 876 /* ssk inherits options of listener sk */ 877 ctx->setsockopt_seq = listener->setsockopt_seq; 878 879 if (ctx->mp_capable) { 880 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); 881 if (!ctx->conn) 882 goto fallback; 883 884 ctx->subflow_id = 1; 885 owner = mptcp_sk(ctx->conn); 886 887 if (mp_opt.deny_join_id0) 888 WRITE_ONCE(owner->pm.remote_deny_join_id0, true); 889 890 mptcp_pm_new_connection(owner, child, 1); 891 892 /* with OoO packets we can reach here without ingress 893 * mpc option 894 */ 895 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { 896 mptcp_pm_fully_established(owner, child); 897 ctx->pm_notified = 1; 898 } 899 } else if (ctx->mp_join) { 900 owner = subflow_req->msk; 901 if (!owner) { 902 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 903 goto dispose_child; 904 } 905 906 if (!subflow_hmac_valid(subflow_req, &mp_opt)) { 907 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 908 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 909 goto dispose_child; 910 } 911 912 if (!mptcp_can_accept_new_subflow(owner)) { 913 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINREJECTED); 914 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 915 goto dispose_child; 916 } 917 918 /* move the msk reference ownership to the subflow */ 919 subflow_req->msk = NULL; 920 ctx->conn = (struct sock *)owner; 921 922 if (subflow_use_different_sport(owner, sk)) { 923 pr_debug("ack inet_sport=%d %d\n", 924 ntohs(inet_sk(sk)->inet_sport), 925 ntohs(inet_sk((struct sock *)owner)->inet_sport)); 926 if (!mptcp_pm_sport_in_anno_list(owner, sk)) { 927 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX); 928 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 929 goto dispose_child; 930 } 931 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX); 932 } 933 934 if (!mptcp_finish_join(child)) { 935 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child); 936 937 subflow_add_reset_reason(skb, subflow->reset_reason); 938 goto dispose_child; 939 } 940 941 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); 942 tcp_rsk(req)->drop_req = true; 943 } 944 } 945 946 /* check for expected invariant - should never trigger, just help 947 * catching earlier subtle bugs 948 */ 949 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 950 (!mptcp_subflow_ctx(child) || 951 !mptcp_subflow_ctx(child)->conn)); 952 return child; 953 954 dispose_child: 955 mptcp_subflow_drop_ctx(child); 956 tcp_rsk(req)->drop_req = true; 957 inet_csk_prepare_for_destroy_sock(child); 958 tcp_done(child); 959 reason = mptcp_get_rst_reason(skb); 960 req->rsk_ops->send_reset(sk, skb, reason); 961 962 /* The last child reference will be released by the caller */ 963 return child; 964 965 fallback: 966 if (fallback) 967 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); 968 mptcp_subflow_drop_ctx(child); 969 return child; 970 } 971 972 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; 973 static struct proto tcp_prot_override __ro_after_init; 974 975 enum mapping_status { 976 MAPPING_OK, 977 MAPPING_INVALID, 978 MAPPING_EMPTY, 979 MAPPING_DATA_FIN, 980 MAPPING_DUMMY, 981 MAPPING_BAD_CSUM, 982 MAPPING_NODSS 983 }; 984 985 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 986 { 987 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n", 988 ssn, subflow->map_subflow_seq, subflow->map_data_len); 989 } 990 991 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 992 { 993 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 994 unsigned int skb_consumed; 995 996 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 997 if (unlikely(skb_consumed >= skb->len)) { 998 DEBUG_NET_WARN_ON_ONCE(1); 999 return true; 1000 } 1001 1002 return skb->len - skb_consumed <= subflow->map_data_len - 1003 mptcp_subflow_get_map_offset(subflow); 1004 } 1005 1006 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 1007 { 1008 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1009 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 1010 1011 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 1012 /* Mapping covers data later in the subflow stream, 1013 * currently unsupported. 1014 */ 1015 dbg_bad_map(subflow, ssn); 1016 return false; 1017 } 1018 if (unlikely(!before(ssn, subflow->map_subflow_seq + 1019 subflow->map_data_len))) { 1020 /* Mapping does covers past subflow data, invalid */ 1021 dbg_bad_map(subflow, ssn); 1022 return false; 1023 } 1024 return true; 1025 } 1026 1027 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, 1028 bool csum_reqd) 1029 { 1030 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1031 u32 offset, seq, delta; 1032 __sum16 csum; 1033 int len; 1034 1035 if (!csum_reqd) 1036 return MAPPING_OK; 1037 1038 /* mapping already validated on previous traversal */ 1039 if (subflow->map_csum_len == subflow->map_data_len) 1040 return MAPPING_OK; 1041 1042 /* traverse the receive queue, ensuring it contains a full 1043 * DSS mapping and accumulating the related csum. 1044 * Preserve the accoumlate csum across multiple calls, to compute 1045 * the csum only once 1046 */ 1047 delta = subflow->map_data_len - subflow->map_csum_len; 1048 for (;;) { 1049 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; 1050 offset = seq - TCP_SKB_CB(skb)->seq; 1051 1052 /* if the current skb has not been accounted yet, csum its contents 1053 * up to the amount covered by the current DSS 1054 */ 1055 if (offset < skb->len) { 1056 __wsum csum; 1057 1058 len = min(skb->len - offset, delta); 1059 csum = skb_checksum(skb, offset, len, 0); 1060 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, 1061 subflow->map_csum_len); 1062 1063 delta -= len; 1064 subflow->map_csum_len += len; 1065 } 1066 if (delta == 0) 1067 break; 1068 1069 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { 1070 /* if this subflow is closed, the partial mapping 1071 * will be never completed; flush the pending skbs, so 1072 * that subflow_sched_work_if_closed() can kick in 1073 */ 1074 if (unlikely(ssk->sk_state == TCP_CLOSE)) 1075 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1076 sk_eat_skb(ssk, skb); 1077 1078 /* not enough data to validate the csum */ 1079 return MAPPING_EMPTY; 1080 } 1081 1082 /* the DSS mapping for next skbs will be validated later, 1083 * when a get_mapping_status call will process such skb 1084 */ 1085 skb = skb->next; 1086 } 1087 1088 /* note that 'map_data_len' accounts only for the carried data, does 1089 * not include the eventual seq increment due to the data fin, 1090 * while the pseudo header requires the original DSS data len, 1091 * including that 1092 */ 1093 csum = __mptcp_make_csum(subflow->map_seq, 1094 subflow->map_subflow_seq, 1095 subflow->map_data_len + subflow->map_data_fin, 1096 subflow->map_data_csum); 1097 if (unlikely(csum)) { 1098 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR); 1099 return MAPPING_BAD_CSUM; 1100 } 1101 1102 subflow->valid_csum_seen = 1; 1103 return MAPPING_OK; 1104 } 1105 1106 static enum mapping_status get_mapping_status(struct sock *ssk, 1107 struct mptcp_sock *msk) 1108 { 1109 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1110 bool csum_reqd = READ_ONCE(msk->csum_enabled); 1111 struct mptcp_ext *mpext; 1112 struct sk_buff *skb; 1113 u16 data_len; 1114 u64 map_seq; 1115 1116 skb = skb_peek(&ssk->sk_receive_queue); 1117 if (!skb) 1118 return MAPPING_EMPTY; 1119 1120 if (mptcp_check_fallback(ssk)) 1121 return MAPPING_DUMMY; 1122 1123 mpext = mptcp_get_ext(skb); 1124 if (!mpext || !mpext->use_map) { 1125 if (!subflow->map_valid && !skb->len) { 1126 /* the TCP stack deliver 0 len FIN pkt to the receive 1127 * queue, that is the only 0len pkts ever expected here, 1128 * and we can admit no mapping only for 0 len pkts 1129 */ 1130 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1131 WARN_ONCE(1, "0len seq %d:%d flags %x", 1132 TCP_SKB_CB(skb)->seq, 1133 TCP_SKB_CB(skb)->end_seq, 1134 TCP_SKB_CB(skb)->tcp_flags); 1135 sk_eat_skb(ssk, skb); 1136 return MAPPING_EMPTY; 1137 } 1138 1139 /* If the required DSS has likely been dropped by a middlebox */ 1140 if (!subflow->map_valid) 1141 return MAPPING_NODSS; 1142 1143 goto validate_seq; 1144 } 1145 1146 trace_get_mapping_status(mpext); 1147 1148 data_len = mpext->data_len; 1149 if (data_len == 0) { 1150 pr_debug("infinite mapping received\n"); 1151 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 1152 return MAPPING_INVALID; 1153 } 1154 1155 if (mpext->data_fin == 1) { 1156 u64 data_fin_seq; 1157 1158 if (data_len == 1) { 1159 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, 1160 mpext->dsn64); 1161 pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq); 1162 if (subflow->map_valid) { 1163 /* A DATA_FIN might arrive in a DSS 1164 * option before the previous mapping 1165 * has been fully consumed. Continue 1166 * handling the existing mapping. 1167 */ 1168 skb_ext_del(skb, SKB_EXT_MPTCP); 1169 return MAPPING_OK; 1170 } 1171 1172 if (updated) 1173 mptcp_schedule_work((struct sock *)msk); 1174 1175 return MAPPING_DATA_FIN; 1176 } 1177 1178 data_fin_seq = mpext->data_seq + data_len - 1; 1179 1180 /* If mpext->data_seq is a 32-bit value, data_fin_seq must also 1181 * be limited to 32 bits. 1182 */ 1183 if (!mpext->dsn64) 1184 data_fin_seq &= GENMASK_ULL(31, 0); 1185 1186 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); 1187 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n", 1188 data_fin_seq, mpext->dsn64); 1189 1190 /* Adjust for DATA_FIN using 1 byte of sequence space */ 1191 data_len--; 1192 } 1193 1194 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64); 1195 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); 1196 1197 if (subflow->map_valid) { 1198 /* Allow replacing only with an identical map */ 1199 if (subflow->map_seq == map_seq && 1200 subflow->map_subflow_seq == mpext->subflow_seq && 1201 subflow->map_data_len == data_len && 1202 subflow->map_csum_reqd == mpext->csum_reqd) { 1203 skb_ext_del(skb, SKB_EXT_MPTCP); 1204 goto validate_csum; 1205 } 1206 1207 /* If this skb data are fully covered by the current mapping, 1208 * the new map would need caching, which is not supported 1209 */ 1210 if (skb_is_fully_mapped(ssk, skb)) { 1211 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); 1212 return MAPPING_INVALID; 1213 } 1214 1215 /* will validate the next map after consuming the current one */ 1216 goto validate_csum; 1217 } 1218 1219 subflow->map_seq = map_seq; 1220 subflow->map_subflow_seq = mpext->subflow_seq; 1221 subflow->map_data_len = data_len; 1222 subflow->map_valid = 1; 1223 subflow->map_data_fin = mpext->data_fin; 1224 subflow->mpc_map = mpext->mpc_map; 1225 subflow->map_csum_reqd = mpext->csum_reqd; 1226 subflow->map_csum_len = 0; 1227 subflow->map_data_csum = csum_unfold(mpext->csum); 1228 1229 /* Cfr RFC 8684 Section 3.3.0 */ 1230 if (unlikely(subflow->map_csum_reqd != csum_reqd)) 1231 return MAPPING_INVALID; 1232 1233 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n", 1234 subflow->map_seq, subflow->map_subflow_seq, 1235 subflow->map_data_len, subflow->map_csum_reqd, 1236 subflow->map_data_csum); 1237 1238 validate_seq: 1239 /* we revalidate valid mapping on new skb, because we must ensure 1240 * the current skb is completely covered by the available mapping 1241 */ 1242 if (!validate_mapping(ssk, skb)) { 1243 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH); 1244 return MAPPING_INVALID; 1245 } 1246 1247 skb_ext_del(skb, SKB_EXT_MPTCP); 1248 1249 validate_csum: 1250 return validate_data_csum(ssk, skb, csum_reqd); 1251 } 1252 1253 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, 1254 u64 limit) 1255 { 1256 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1257 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 1258 struct tcp_sock *tp = tcp_sk(ssk); 1259 u32 offset, incr, avail_len; 1260 1261 offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; 1262 if (WARN_ON_ONCE(offset > skb->len)) 1263 goto out; 1264 1265 avail_len = skb->len - offset; 1266 incr = limit >= avail_len ? avail_len + fin : limit; 1267 1268 pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len, 1269 offset, subflow->map_subflow_seq); 1270 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); 1271 tcp_sk(ssk)->copied_seq += incr; 1272 1273 out: 1274 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) 1275 sk_eat_skb(ssk, skb); 1276 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) 1277 subflow->map_valid = 0; 1278 } 1279 1280 static bool subflow_is_done(const struct sock *sk) 1281 { 1282 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 1283 } 1284 1285 /* sched mptcp worker for subflow cleanup if no more data is pending */ 1286 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) 1287 { 1288 struct sock *sk = (struct sock *)msk; 1289 1290 if (likely(ssk->sk_state != TCP_CLOSE && 1291 (ssk->sk_state != TCP_CLOSE_WAIT || 1292 inet_sk_state_load(sk) != TCP_ESTABLISHED))) 1293 return; 1294 1295 if (!skb_queue_empty(&ssk->sk_receive_queue)) 1296 return; 1297 1298 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 1299 mptcp_schedule_work(sk); 1300 1301 /* when the fallback subflow closes the rx side, trigger a 'dummy' 1302 * ingress data fin, so that the msk state will follow along 1303 */ 1304 if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) && 1305 msk->first == ssk && 1306 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) 1307 mptcp_schedule_work(sk); 1308 } 1309 1310 static bool mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) 1311 { 1312 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1313 unsigned long fail_tout; 1314 1315 /* we are really failing, prevent any later subflow join */ 1316 spin_lock_bh(&msk->fallback_lock); 1317 if (!msk->allow_infinite_fallback) { 1318 spin_unlock_bh(&msk->fallback_lock); 1319 return false; 1320 } 1321 msk->allow_subflows = false; 1322 spin_unlock_bh(&msk->fallback_lock); 1323 1324 /* graceful failure can happen only on the MPC subflow */ 1325 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) 1326 return false; 1327 1328 /* since the close timeout take precedence on the fail one, 1329 * no need to start the latter when the first is already set 1330 */ 1331 if (sock_flag((struct sock *)msk, SOCK_DEAD)) 1332 return true; 1333 1334 /* we don't need extreme accuracy here, use a zero fail_tout as special 1335 * value meaning no fail timeout at all; 1336 */ 1337 fail_tout = jiffies + TCP_RTO_MAX; 1338 if (!fail_tout) 1339 fail_tout = 1; 1340 WRITE_ONCE(subflow->fail_tout, fail_tout); 1341 tcp_send_ack(ssk); 1342 1343 mptcp_reset_tout_timer(msk, subflow->fail_tout); 1344 return true; 1345 } 1346 1347 static bool subflow_check_data_avail(struct sock *ssk) 1348 { 1349 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1350 enum mapping_status status; 1351 struct mptcp_sock *msk; 1352 struct sk_buff *skb; 1353 1354 if (!skb_peek(&ssk->sk_receive_queue)) 1355 WRITE_ONCE(subflow->data_avail, false); 1356 if (subflow->data_avail) 1357 return true; 1358 1359 msk = mptcp_sk(subflow->conn); 1360 for (;;) { 1361 u64 ack_seq; 1362 u64 old_ack; 1363 1364 status = get_mapping_status(ssk, msk); 1365 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); 1366 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY || 1367 status == MAPPING_BAD_CSUM || status == MAPPING_NODSS)) 1368 goto fallback; 1369 1370 if (status != MAPPING_OK) 1371 goto no_data; 1372 1373 skb = skb_peek(&ssk->sk_receive_queue); 1374 if (WARN_ON_ONCE(!skb)) 1375 goto no_data; 1376 1377 if (unlikely(!READ_ONCE(msk->can_ack))) 1378 goto fallback; 1379 1380 old_ack = READ_ONCE(msk->ack_seq); 1381 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 1382 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack, 1383 ack_seq); 1384 if (unlikely(before64(ack_seq, old_ack))) { 1385 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); 1386 continue; 1387 } 1388 1389 WRITE_ONCE(subflow->data_avail, true); 1390 break; 1391 } 1392 return true; 1393 1394 no_data: 1395 subflow_sched_work_if_closed(msk, ssk); 1396 return false; 1397 1398 fallback: 1399 if (!__mptcp_check_fallback(msk)) { 1400 /* RFC 8684 section 3.7. */ 1401 if (status == MAPPING_BAD_CSUM && 1402 (subflow->mp_join || subflow->valid_csum_seen)) { 1403 subflow->send_mp_fail = 1; 1404 1405 if (!mptcp_subflow_fail(msk, ssk)) { 1406 subflow->reset_transient = 0; 1407 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; 1408 goto reset; 1409 } 1410 WRITE_ONCE(subflow->data_avail, true); 1411 return true; 1412 } 1413 1414 if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSFALLBACK)) { 1415 /* fatal protocol error, close the socket. 1416 * subflow_error_report() will introduce the appropriate barriers 1417 */ 1418 subflow->reset_transient = 0; 1419 subflow->reset_reason = status == MAPPING_NODSS ? 1420 MPTCP_RST_EMIDDLEBOX : 1421 MPTCP_RST_EMPTCP; 1422 1423 reset: 1424 WRITE_ONCE(ssk->sk_err, EBADMSG); 1425 tcp_set_state(ssk, TCP_CLOSE); 1426 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1427 sk_eat_skb(ssk, skb); 1428 mptcp_send_active_reset_reason(ssk); 1429 WRITE_ONCE(subflow->data_avail, false); 1430 return false; 1431 } 1432 } 1433 1434 skb = skb_peek(&ssk->sk_receive_queue); 1435 subflow->map_valid = 1; 1436 subflow->map_seq = READ_ONCE(msk->ack_seq); 1437 subflow->map_data_len = skb->len; 1438 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 1439 WRITE_ONCE(subflow->data_avail, true); 1440 return true; 1441 } 1442 1443 bool mptcp_subflow_data_available(struct sock *sk) 1444 { 1445 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1446 1447 /* check if current mapping is still valid */ 1448 if (subflow->map_valid && 1449 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 1450 subflow->map_valid = 0; 1451 WRITE_ONCE(subflow->data_avail, false); 1452 1453 pr_debug("Done with mapping: seq=%u data_len=%u\n", 1454 subflow->map_subflow_seq, 1455 subflow->map_data_len); 1456 } 1457 1458 return subflow_check_data_avail(sk); 1459 } 1460 1461 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, 1462 * not the ssk one. 1463 * 1464 * In mptcp, rwin is about the mptcp-level connection data. 1465 * 1466 * Data that is still on the ssk rx queue can thus be ignored, 1467 * as far as mptcp peer is concerned that data is still inflight. 1468 * DSS ACK is updated when skb is moved to the mptcp rx queue. 1469 */ 1470 void mptcp_space(const struct sock *ssk, int *space, int *full_space) 1471 { 1472 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1473 const struct sock *sk = subflow->conn; 1474 1475 *space = __mptcp_space(sk); 1476 *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); 1477 } 1478 1479 static void subflow_error_report(struct sock *ssk) 1480 { 1481 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1482 1483 /* bail early if this is a no-op, so that we avoid introducing a 1484 * problematic lockdep dependency between TCP accept queue lock 1485 * and msk socket spinlock 1486 */ 1487 if (!sk->sk_socket) 1488 return; 1489 1490 mptcp_data_lock(sk); 1491 if (!sock_owned_by_user(sk)) 1492 __mptcp_error_report(sk); 1493 else 1494 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags); 1495 mptcp_data_unlock(sk); 1496 } 1497 1498 static void subflow_data_ready(struct sock *sk) 1499 { 1500 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1501 u16 state = 1 << inet_sk_state_load(sk); 1502 struct sock *parent = subflow->conn; 1503 struct mptcp_sock *msk; 1504 1505 trace_sk_data_ready(sk); 1506 1507 msk = mptcp_sk(parent); 1508 if (state & TCPF_LISTEN) { 1509 /* MPJ subflow are removed from accept queue before reaching here, 1510 * avoid stray wakeups 1511 */ 1512 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) 1513 return; 1514 1515 parent->sk_data_ready(parent); 1516 return; 1517 } 1518 1519 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && 1520 !subflow->mp_join && !(state & TCPF_CLOSE)); 1521 1522 if (mptcp_subflow_data_available(sk)) { 1523 mptcp_data_ready(parent, sk); 1524 1525 /* subflow-level lowat test are not relevant. 1526 * respect the msk-level threshold eventually mandating an immediate ack 1527 */ 1528 if (mptcp_data_avail(msk) < parent->sk_rcvlowat && 1529 (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss) 1530 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; 1531 } else if (unlikely(sk->sk_err)) { 1532 subflow_error_report(sk); 1533 } 1534 } 1535 1536 static void subflow_write_space(struct sock *ssk) 1537 { 1538 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1539 1540 mptcp_propagate_sndbuf(sk, ssk); 1541 mptcp_write_space(sk); 1542 } 1543 1544 static const struct inet_connection_sock_af_ops * 1545 subflow_default_af_ops(struct sock *sk) 1546 { 1547 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1548 if (sk->sk_family == AF_INET6) 1549 return &subflow_v6_specific; 1550 #endif 1551 return &subflow_specific; 1552 } 1553 1554 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1555 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 1556 { 1557 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1558 struct inet_connection_sock *icsk = inet_csk(sk); 1559 const struct inet_connection_sock_af_ops *target; 1560 1561 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 1562 1563 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n", 1564 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 1565 1566 if (likely(icsk->icsk_af_ops == target)) 1567 return; 1568 1569 subflow->icsk_af_ops = icsk->icsk_af_ops; 1570 icsk->icsk_af_ops = target; 1571 } 1572 #endif 1573 1574 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 1575 struct sockaddr_storage *addr, 1576 unsigned short family) 1577 { 1578 memset(addr, 0, sizeof(*addr)); 1579 addr->ss_family = family; 1580 if (addr->ss_family == AF_INET) { 1581 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; 1582 1583 if (info->family == AF_INET) 1584 in_addr->sin_addr = info->addr; 1585 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1586 else if (ipv6_addr_v4mapped(&info->addr6)) 1587 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3]; 1588 #endif 1589 in_addr->sin_port = info->port; 1590 } 1591 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1592 else if (addr->ss_family == AF_INET6) { 1593 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; 1594 1595 if (info->family == AF_INET) 1596 ipv6_addr_set_v4mapped(info->addr.s_addr, 1597 &in6_addr->sin6_addr); 1598 else 1599 in6_addr->sin6_addr = info->addr6; 1600 in6_addr->sin6_port = info->port; 1601 } 1602 #endif 1603 } 1604 1605 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local, 1606 const struct mptcp_addr_info *remote) 1607 { 1608 struct mptcp_sock *msk = mptcp_sk(sk); 1609 struct mptcp_subflow_context *subflow; 1610 int local_id = local->addr.id; 1611 struct sockaddr_storage addr; 1612 int remote_id = remote->id; 1613 int err = -ENOTCONN; 1614 struct socket *sf; 1615 struct sock *ssk; 1616 u32 remote_token; 1617 int addrlen; 1618 1619 /* The userspace PM sent the request too early? */ 1620 if (!mptcp_is_fully_established(sk)) 1621 goto err_out; 1622 1623 err = mptcp_subflow_create_socket(sk, local->addr.family, &sf); 1624 if (err) { 1625 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCREATSKERR); 1626 pr_debug("msk=%p local=%d remote=%d create sock error: %d\n", 1627 msk, local_id, remote_id, err); 1628 goto err_out; 1629 } 1630 1631 ssk = sf->sk; 1632 subflow = mptcp_subflow_ctx(ssk); 1633 do { 1634 get_random_bytes(&subflow->local_nonce, sizeof(u32)); 1635 } while (!subflow->local_nonce); 1636 1637 /* if 'IPADDRANY', the ID will be set later, after the routing */ 1638 if (local->addr.family == AF_INET) { 1639 if (!local->addr.addr.s_addr) 1640 local_id = -1; 1641 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1642 } else if (sk->sk_family == AF_INET6) { 1643 if (ipv6_addr_any(&local->addr.addr6)) 1644 local_id = -1; 1645 #endif 1646 } 1647 1648 if (local_id >= 0) 1649 subflow_set_local_id(subflow, local_id); 1650 1651 subflow->remote_key_valid = 1; 1652 subflow->remote_key = READ_ONCE(msk->remote_key); 1653 subflow->local_key = READ_ONCE(msk->local_key); 1654 subflow->token = msk->token; 1655 mptcp_info2sockaddr(&local->addr, &addr, ssk->sk_family); 1656 1657 addrlen = sizeof(struct sockaddr_in); 1658 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1659 if (addr.ss_family == AF_INET6) 1660 addrlen = sizeof(struct sockaddr_in6); 1661 #endif 1662 ssk->sk_bound_dev_if = local->ifindex; 1663 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); 1664 if (err) { 1665 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXBINDERR); 1666 pr_debug("msk=%p local=%d remote=%d bind error: %d\n", 1667 msk, local_id, remote_id, err); 1668 goto failed; 1669 } 1670 1671 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 1672 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk, 1673 remote_token, local_id, remote_id); 1674 subflow->remote_token = remote_token; 1675 WRITE_ONCE(subflow->remote_id, remote_id); 1676 subflow->request_join = 1; 1677 subflow->request_bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1678 subflow->subflow_id = msk->subflow_id++; 1679 mptcp_info2sockaddr(remote, &addr, ssk->sk_family); 1680 1681 sock_hold(ssk); 1682 list_add_tail(&subflow->node, &msk->conn_list); 1683 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); 1684 if (err && err != -EINPROGRESS) { 1685 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCONNECTERR); 1686 pr_debug("msk=%p local=%d remote=%d connect error: %d\n", 1687 msk, local_id, remote_id, err); 1688 goto failed_unlink; 1689 } 1690 1691 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTX); 1692 1693 /* discard the subflow socket */ 1694 mptcp_sock_graft(ssk, sk->sk_socket); 1695 iput(SOCK_INODE(sf)); 1696 mptcp_stop_tout_timer(sk); 1697 return 0; 1698 1699 failed_unlink: 1700 list_del(&subflow->node); 1701 sock_put(mptcp_subflow_tcp_sock(subflow)); 1702 1703 failed: 1704 subflow->disposable = 1; 1705 sock_release(sf); 1706 1707 err_out: 1708 /* we account subflows before the creation, and this failures will not 1709 * be caught by sk_state_change() 1710 */ 1711 mptcp_pm_close_subflow(msk); 1712 return err; 1713 } 1714 1715 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child) 1716 { 1717 #ifdef CONFIG_SOCK_CGROUP_DATA 1718 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data, 1719 *child_skcd = &child->sk_cgrp_data; 1720 1721 /* only the additional subflows created by kworkers have to be modified */ 1722 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) != 1723 cgroup_id(sock_cgroup_ptr(child_skcd))) { 1724 #ifdef CONFIG_MEMCG 1725 struct mem_cgroup *memcg = parent->sk_memcg; 1726 1727 mem_cgroup_sk_free(child); 1728 if (memcg && css_tryget(&memcg->css)) 1729 child->sk_memcg = memcg; 1730 #endif /* CONFIG_MEMCG */ 1731 1732 cgroup_sk_free(child_skcd); 1733 *child_skcd = *parent_skcd; 1734 cgroup_sk_clone(child_skcd); 1735 } 1736 #endif /* CONFIG_SOCK_CGROUP_DATA */ 1737 } 1738 1739 static void mptcp_subflow_ops_override(struct sock *ssk) 1740 { 1741 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1742 if (ssk->sk_prot == &tcpv6_prot) 1743 ssk->sk_prot = &tcpv6_prot_override; 1744 else 1745 #endif 1746 ssk->sk_prot = &tcp_prot_override; 1747 } 1748 1749 static void mptcp_subflow_ops_undo_override(struct sock *ssk) 1750 { 1751 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1752 if (ssk->sk_prot == &tcpv6_prot_override) 1753 ssk->sk_prot = &tcpv6_prot; 1754 else 1755 #endif 1756 ssk->sk_prot = &tcp_prot; 1757 } 1758 1759 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 1760 struct socket **new_sock) 1761 { 1762 struct mptcp_subflow_context *subflow; 1763 struct net *net = sock_net(sk); 1764 struct socket *sf; 1765 int err; 1766 1767 /* un-accepted server sockets can reach here - on bad configuration 1768 * bail early to avoid greater trouble later 1769 */ 1770 if (unlikely(!sk->sk_socket)) 1771 return -EINVAL; 1772 1773 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); 1774 if (err) 1775 return err; 1776 1777 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); 1778 1779 err = security_mptcp_add_subflow(sk, sf->sk); 1780 if (err) 1781 goto err_free; 1782 1783 /* the newly created socket has to be in the same cgroup as its parent */ 1784 mptcp_attach_cgroup(sk, sf->sk); 1785 1786 /* kernel sockets do not by default acquire net ref, but TCP timer 1787 * needs it. 1788 * Update ns_tracker to current stack trace and refcounted tracker. 1789 */ 1790 sk_net_refcnt_upgrade(sf->sk); 1791 err = tcp_set_ulp(sf->sk, "mptcp"); 1792 if (err) 1793 goto err_free; 1794 1795 mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk); 1796 release_sock(sf->sk); 1797 1798 /* the newly created socket really belongs to the owning MPTCP 1799 * socket, even if for additional subflows the allocation is performed 1800 * by a kernel workqueue. Adjust inode references, so that the 1801 * procfs/diag interfaces really show this one belonging to the correct 1802 * user. 1803 */ 1804 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; 1805 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; 1806 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1807 1808 subflow = mptcp_subflow_ctx(sf->sk); 1809 pr_debug("subflow=%p\n", subflow); 1810 1811 *new_sock = sf; 1812 sock_hold(sk); 1813 subflow->conn = sk; 1814 mptcp_subflow_ops_override(sf->sk); 1815 1816 return 0; 1817 1818 err_free: 1819 release_sock(sf->sk); 1820 sock_release(sf); 1821 return err; 1822 } 1823 1824 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 1825 gfp_t priority) 1826 { 1827 struct inet_connection_sock *icsk = inet_csk(sk); 1828 struct mptcp_subflow_context *ctx; 1829 1830 ctx = kzalloc(sizeof(*ctx), priority); 1831 if (!ctx) 1832 return NULL; 1833 1834 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 1835 INIT_LIST_HEAD(&ctx->node); 1836 INIT_LIST_HEAD(&ctx->delegated_node); 1837 1838 pr_debug("subflow=%p\n", ctx); 1839 1840 ctx->tcp_sock = sk; 1841 WRITE_ONCE(ctx->local_id, -1); 1842 1843 return ctx; 1844 } 1845 1846 static void __subflow_state_change(struct sock *sk) 1847 { 1848 struct socket_wq *wq; 1849 1850 rcu_read_lock(); 1851 wq = rcu_dereference(sk->sk_wq); 1852 if (skwq_has_sleeper(wq)) 1853 wake_up_interruptible_all(&wq->wait); 1854 rcu_read_unlock(); 1855 } 1856 1857 static void subflow_state_change(struct sock *sk) 1858 { 1859 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1860 struct sock *parent = subflow->conn; 1861 1862 __subflow_state_change(sk); 1863 1864 if (subflow_simultaneous_connect(sk)) { 1865 WARN_ON_ONCE(!mptcp_try_fallback(sk, MPTCP_MIB_SIMULTCONNFALLBACK)); 1866 subflow->conn_finished = 1; 1867 mptcp_propagate_state(parent, sk, subflow, NULL); 1868 } 1869 1870 /* as recvmsg() does not acquire the subflow socket for ssk selection 1871 * a fin packet carrying a DSS can be unnoticed if we don't trigger 1872 * the data available machinery here. 1873 */ 1874 if (mptcp_subflow_data_available(sk)) 1875 mptcp_data_ready(parent, sk); 1876 else if (unlikely(sk->sk_err)) 1877 subflow_error_report(sk); 1878 1879 subflow_sched_work_if_closed(mptcp_sk(parent), sk); 1880 } 1881 1882 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) 1883 { 1884 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; 1885 struct request_sock *req, *head, *tail; 1886 struct mptcp_subflow_context *subflow; 1887 struct sock *sk, *ssk; 1888 1889 /* Due to lock dependencies no relevant lock can be acquired under rskq_lock. 1890 * Splice the req list, so that accept() can not reach the pending ssk after 1891 * the listener socket is released below. 1892 */ 1893 spin_lock_bh(&queue->rskq_lock); 1894 head = queue->rskq_accept_head; 1895 tail = queue->rskq_accept_tail; 1896 queue->rskq_accept_head = NULL; 1897 queue->rskq_accept_tail = NULL; 1898 spin_unlock_bh(&queue->rskq_lock); 1899 1900 if (!head) 1901 return; 1902 1903 /* can't acquire the msk socket lock under the subflow one, 1904 * or will cause ABBA deadlock 1905 */ 1906 release_sock(listener_ssk); 1907 1908 for (req = head; req; req = req->dl_next) { 1909 ssk = req->sk; 1910 if (!sk_is_mptcp(ssk)) 1911 continue; 1912 1913 subflow = mptcp_subflow_ctx(ssk); 1914 if (!subflow || !subflow->conn) 1915 continue; 1916 1917 sk = subflow->conn; 1918 sock_hold(sk); 1919 1920 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 1921 __mptcp_unaccepted_force_close(sk); 1922 release_sock(sk); 1923 1924 /* lockdep will report a false positive ABBA deadlock 1925 * between cancel_work_sync and the listener socket. 1926 * The involved locks belong to different sockets WRT 1927 * the existing AB chain. 1928 * Using a per socket key is problematic as key 1929 * deregistration requires process context and must be 1930 * performed at socket disposal time, in atomic 1931 * context. 1932 * Just tell lockdep to consider the listener socket 1933 * released here. 1934 */ 1935 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); 1936 mptcp_cancel_work(sk); 1937 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_); 1938 1939 sock_put(sk); 1940 } 1941 1942 /* we are still under the listener msk socket lock */ 1943 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); 1944 1945 /* restore the listener queue, to let the TCP code clean it up */ 1946 spin_lock_bh(&queue->rskq_lock); 1947 WARN_ON_ONCE(queue->rskq_accept_head); 1948 queue->rskq_accept_head = head; 1949 queue->rskq_accept_tail = tail; 1950 spin_unlock_bh(&queue->rskq_lock); 1951 } 1952 1953 static int subflow_ulp_init(struct sock *sk) 1954 { 1955 struct inet_connection_sock *icsk = inet_csk(sk); 1956 struct mptcp_subflow_context *ctx; 1957 struct tcp_sock *tp = tcp_sk(sk); 1958 int err = 0; 1959 1960 /* disallow attaching ULP to a socket unless it has been 1961 * created with sock_create_kern() 1962 */ 1963 if (!sk->sk_kern_sock) { 1964 err = -EOPNOTSUPP; 1965 goto out; 1966 } 1967 1968 ctx = subflow_create_ctx(sk, GFP_KERNEL); 1969 if (!ctx) { 1970 err = -ENOMEM; 1971 goto out; 1972 } 1973 1974 pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family); 1975 1976 tp->is_mptcp = 1; 1977 ctx->icsk_af_ops = icsk->icsk_af_ops; 1978 icsk->icsk_af_ops = subflow_default_af_ops(sk); 1979 ctx->tcp_state_change = sk->sk_state_change; 1980 ctx->tcp_error_report = sk->sk_error_report; 1981 1982 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable); 1983 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space); 1984 1985 sk->sk_data_ready = subflow_data_ready; 1986 sk->sk_write_space = subflow_write_space; 1987 sk->sk_state_change = subflow_state_change; 1988 sk->sk_error_report = subflow_error_report; 1989 out: 1990 return err; 1991 } 1992 1993 static void subflow_ulp_release(struct sock *ssk) 1994 { 1995 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 1996 bool release = true; 1997 struct sock *sk; 1998 1999 if (!ctx) 2000 return; 2001 2002 sk = ctx->conn; 2003 if (sk) { 2004 /* if the msk has been orphaned, keep the ctx 2005 * alive, will be freed by __mptcp_close_ssk(), 2006 * when the subflow is still unaccepted 2007 */ 2008 release = ctx->disposable || list_empty(&ctx->node); 2009 2010 /* inet_child_forget() does not call sk_state_change(), 2011 * explicitly trigger the socket close machinery 2012 */ 2013 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, 2014 &mptcp_sk(sk)->flags)) 2015 mptcp_schedule_work(sk); 2016 sock_put(sk); 2017 } 2018 2019 mptcp_subflow_ops_undo_override(ssk); 2020 if (release) 2021 kfree_rcu(ctx, rcu); 2022 } 2023 2024 static void subflow_ulp_clone(const struct request_sock *req, 2025 struct sock *newsk, 2026 const gfp_t priority) 2027 { 2028 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 2029 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 2030 struct mptcp_subflow_context *new_ctx; 2031 2032 if (!tcp_rsk(req)->is_mptcp || 2033 (!subflow_req->mp_capable && !subflow_req->mp_join)) { 2034 subflow_ulp_fallback(newsk, old_ctx); 2035 return; 2036 } 2037 2038 new_ctx = subflow_create_ctx(newsk, priority); 2039 if (!new_ctx) { 2040 subflow_ulp_fallback(newsk, old_ctx); 2041 return; 2042 } 2043 2044 new_ctx->conn_finished = 1; 2045 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 2046 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 2047 new_ctx->tcp_error_report = old_ctx->tcp_error_report; 2048 new_ctx->rel_write_seq = 1; 2049 2050 if (subflow_req->mp_capable) { 2051 /* see comments in subflow_syn_recv_sock(), MPTCP connection 2052 * is fully established only after we receive the remote key 2053 */ 2054 new_ctx->mp_capable = 1; 2055 new_ctx->local_key = subflow_req->local_key; 2056 new_ctx->token = subflow_req->token; 2057 new_ctx->ssn_offset = subflow_req->ssn_offset; 2058 new_ctx->idsn = subflow_req->idsn; 2059 2060 /* this is the first subflow, id is always 0 */ 2061 subflow_set_local_id(new_ctx, 0); 2062 } else if (subflow_req->mp_join) { 2063 new_ctx->ssn_offset = subflow_req->ssn_offset; 2064 new_ctx->mp_join = 1; 2065 WRITE_ONCE(new_ctx->fully_established, true); 2066 new_ctx->remote_key_valid = 1; 2067 new_ctx->backup = subflow_req->backup; 2068 new_ctx->request_bkup = subflow_req->request_bkup; 2069 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id); 2070 new_ctx->token = subflow_req->token; 2071 new_ctx->thmac = subflow_req->thmac; 2072 2073 /* the subflow req id is valid, fetched via subflow_check_req() 2074 * and subflow_token_join_request() 2075 */ 2076 subflow_set_local_id(new_ctx, subflow_req->local_id); 2077 } 2078 } 2079 2080 static void tcp_release_cb_override(struct sock *ssk) 2081 { 2082 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 2083 long status; 2084 2085 /* process and clear all the pending actions, but leave the subflow into 2086 * the napi queue. To respect locking, only the same CPU that originated 2087 * the action can touch the list. mptcp_napi_poll will take care of it. 2088 */ 2089 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); 2090 if (status) 2091 mptcp_subflow_process_delegated(ssk, status); 2092 2093 tcp_release_cb(ssk); 2094 } 2095 2096 static int tcp_abort_override(struct sock *ssk, int err) 2097 { 2098 /* closing a listener subflow requires a great deal of care. 2099 * keep it simple and just prevent such operation 2100 */ 2101 if (inet_sk_state_load(ssk) == TCP_LISTEN) 2102 return -EINVAL; 2103 2104 return tcp_abort(ssk, err); 2105 } 2106 2107 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 2108 .name = "mptcp", 2109 .owner = THIS_MODULE, 2110 .init = subflow_ulp_init, 2111 .release = subflow_ulp_release, 2112 .clone = subflow_ulp_clone, 2113 }; 2114 2115 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 2116 { 2117 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 2118 2119 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 2120 subflow_ops->obj_size, 0, 2121 SLAB_ACCOUNT | 2122 SLAB_TYPESAFE_BY_RCU, 2123 NULL); 2124 if (!subflow_ops->slab) 2125 return -ENOMEM; 2126 2127 return 0; 2128 } 2129 2130 void __init mptcp_subflow_init(void) 2131 { 2132 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; 2133 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; 2134 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; 2135 2136 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) 2137 panic("MPTCP: failed to init subflow v4 request sock ops\n"); 2138 2139 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 2140 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; 2141 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack; 2142 2143 subflow_specific = ipv4_specific; 2144 subflow_specific.conn_request = subflow_v4_conn_request; 2145 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 2146 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 2147 subflow_specific.rebuild_header = subflow_rebuild_header; 2148 2149 tcp_prot_override = tcp_prot; 2150 tcp_prot_override.release_cb = tcp_release_cb_override; 2151 tcp_prot_override.diag_destroy = tcp_abort_override; 2152 2153 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2154 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock 2155 * structures for v4 and v6 have the same size. It should not changed in 2156 * the future but better to make sure to be warned if it is no longer 2157 * the case. 2158 */ 2159 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); 2160 2161 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; 2162 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; 2163 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; 2164 2165 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) 2166 panic("MPTCP: failed to init subflow v6 request sock ops\n"); 2167 2168 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 2169 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; 2170 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack; 2171 2172 subflow_v6_specific = ipv6_specific; 2173 subflow_v6_specific.conn_request = subflow_v6_conn_request; 2174 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 2175 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 2176 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header; 2177 2178 subflow_v6m_specific = subflow_v6_specific; 2179 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 2180 subflow_v6m_specific.send_check = ipv4_specific.send_check; 2181 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 2182 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 2183 subflow_v6m_specific.rebuild_header = subflow_rebuild_header; 2184 2185 tcpv6_prot_override = tcpv6_prot; 2186 tcpv6_prot_override.release_cb = tcp_release_cb_override; 2187 tcpv6_prot_override.diag_destroy = tcp_abort_override; 2188 #endif 2189 2190 mptcp_diag_subflow_init(&subflow_ulp_ops); 2191 2192 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 2193 panic("MPTCP: failed to register subflows to ULP\n"); 2194 } 2195