1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/sha2.h> 13 #include <crypto/utils.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 19 #include <net/ip6_route.h> 20 #include <net/transp_v6.h> 21 #endif 22 #include <net/mptcp.h> 23 24 #include "protocol.h" 25 #include "mib.h" 26 27 #include <trace/events/mptcp.h> 28 #include <trace/events/sock.h> 29 30 static void mptcp_subflow_ops_undo_override(struct sock *ssk); 31 32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, 33 enum linux_mptcp_mib_field field) 34 { 35 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); 36 } 37 38 static void subflow_req_destructor(struct request_sock *req) 39 { 40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 41 42 pr_debug("subflow_req=%p\n", subflow_req); 43 44 if (subflow_req->msk) 45 sock_put((struct sock *)subflow_req->msk); 46 47 mptcp_token_destroy_request(req); 48 } 49 50 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, 51 void *hmac) 52 { 53 u8 msg[8]; 54 55 put_unaligned_be32(nonce1, &msg[0]); 56 put_unaligned_be32(nonce2, &msg[4]); 57 58 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 59 } 60 61 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) 62 { 63 return mptcp_is_fully_established((void *)msk) && 64 ((mptcp_pm_is_userspace(msk) && 65 mptcp_userspace_pm_active(msk)) || 66 READ_ONCE(msk->pm.accept_subflow)); 67 } 68 69 /* validate received token and create truncated hmac and nonce for SYN-ACK */ 70 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req) 71 { 72 struct mptcp_sock *msk = subflow_req->msk; 73 u8 hmac[SHA256_DIGEST_SIZE]; 74 75 get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); 76 77 subflow_generate_hmac(READ_ONCE(msk->local_key), 78 READ_ONCE(msk->remote_key), 79 subflow_req->local_nonce, 80 subflow_req->remote_nonce, hmac); 81 82 subflow_req->thmac = get_unaligned_be64(hmac); 83 } 84 85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) 86 { 87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 88 struct mptcp_sock *msk; 89 int local_id; 90 91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token); 92 if (!msk) { 93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 94 return NULL; 95 } 96 97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); 98 if (local_id < 0) { 99 sock_put((struct sock *)msk); 100 return NULL; 101 } 102 subflow_req->local_id = local_id; 103 subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req); 104 105 return msk; 106 } 107 108 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) 109 { 110 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 111 112 subflow_req->mp_capable = 0; 113 subflow_req->mp_join = 0; 114 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); 115 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener)); 116 subflow_req->msk = NULL; 117 mptcp_token_init_request(req); 118 } 119 120 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) 121 { 122 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; 123 } 124 125 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason) 126 { 127 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 128 129 if (mpext) { 130 memset(mpext, 0, sizeof(*mpext)); 131 mpext->reset_reason = reason; 132 } 133 } 134 135 static int subflow_reset_req_endp(struct request_sock *req, struct sk_buff *skb) 136 { 137 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEENDPATTEMPT); 138 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 139 return -EPERM; 140 } 141 142 /* Init mptcp request socket. 143 * 144 * Returns an error code if a JOIN has failed and a TCP reset 145 * should be sent. 146 */ 147 static int subflow_check_req(struct request_sock *req, 148 const struct sock *sk_listener, 149 struct sk_buff *skb) 150 { 151 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 152 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 153 struct mptcp_options_received mp_opt; 154 bool opt_mp_capable, opt_mp_join; 155 156 pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener); 157 158 #ifdef CONFIG_TCP_MD5SIG 159 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 160 * TCP option space. 161 */ 162 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) { 163 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 164 return -EINVAL; 165 } 166 #endif 167 168 mptcp_get_options(skb, &mp_opt); 169 170 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN); 171 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN); 172 if (opt_mp_capable) { 173 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 174 175 if (unlikely(listener->pm_listener)) 176 return subflow_reset_req_endp(req, skb); 177 if (opt_mp_join) 178 return 0; 179 } else if (opt_mp_join) { 180 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 181 182 if (mp_opt.backup) 183 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX); 184 } else if (unlikely(listener->pm_listener)) { 185 return subflow_reset_req_endp(req, skb); 186 } 187 188 if (opt_mp_capable && listener->request_mptcp) { 189 int err, retries = MPTCP_TOKEN_MAX_RETRIES; 190 191 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 192 again: 193 do { 194 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); 195 } while (subflow_req->local_key == 0); 196 197 if (unlikely(req->syncookie)) { 198 mptcp_crypto_key_sha(subflow_req->local_key, 199 &subflow_req->token, 200 &subflow_req->idsn); 201 if (mptcp_token_exists(subflow_req->token)) { 202 if (retries-- > 0) 203 goto again; 204 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 205 } else { 206 subflow_req->mp_capable = 1; 207 } 208 return 0; 209 } 210 211 err = mptcp_token_new_request(req); 212 if (err == 0) 213 subflow_req->mp_capable = 1; 214 else if (retries-- > 0) 215 goto again; 216 else 217 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); 218 219 } else if (opt_mp_join && listener->request_mptcp) { 220 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 221 subflow_req->mp_join = 1; 222 subflow_req->backup = mp_opt.backup; 223 subflow_req->remote_id = mp_opt.join_id; 224 subflow_req->token = mp_opt.token; 225 subflow_req->remote_nonce = mp_opt.nonce; 226 subflow_req->msk = subflow_token_join_request(req); 227 228 /* Can't fall back to TCP in this case. */ 229 if (!subflow_req->msk) { 230 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 231 return -EPERM; 232 } 233 234 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { 235 pr_debug("syn inet_sport=%d %d\n", 236 ntohs(inet_sk(sk_listener)->inet_sport), 237 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); 238 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { 239 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); 240 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 241 return -EPERM; 242 } 243 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX); 244 } 245 246 subflow_req_create_thmac(subflow_req); 247 248 if (unlikely(req->syncookie)) { 249 if (!mptcp_can_accept_new_subflow(subflow_req->msk)) { 250 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 251 return -EPERM; 252 } 253 254 subflow_init_req_cookie_join_save(subflow_req, skb); 255 } 256 257 pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token, 258 subflow_req->remote_nonce, subflow_req->msk); 259 } 260 261 return 0; 262 } 263 264 int mptcp_subflow_init_cookie_req(struct request_sock *req, 265 const struct sock *sk_listener, 266 struct sk_buff *skb) 267 { 268 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 269 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 270 struct mptcp_options_received mp_opt; 271 bool opt_mp_capable, opt_mp_join; 272 int err; 273 274 subflow_init_req(req, sk_listener); 275 mptcp_get_options(skb, &mp_opt); 276 277 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK); 278 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK); 279 if (opt_mp_capable && opt_mp_join) 280 return -EINVAL; 281 282 if (opt_mp_capable && listener->request_mptcp) { 283 if (mp_opt.sndr_key == 0) 284 return -EINVAL; 285 286 subflow_req->local_key = mp_opt.rcvr_key; 287 err = mptcp_token_new_request(req); 288 if (err) 289 return err; 290 291 subflow_req->mp_capable = 1; 292 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 293 } else if (opt_mp_join && listener->request_mptcp) { 294 if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) 295 return -EINVAL; 296 297 subflow_req->mp_join = 1; 298 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; 299 } 300 301 return 0; 302 } 303 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); 304 305 static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb) 306 { 307 const struct mptcp_ext *mpext = mptcp_get_ext(skb); 308 309 if (!mpext) 310 return SK_RST_REASON_NOT_SPECIFIED; 311 312 return sk_rst_convert_mptcp_reason(mpext->reset_reason); 313 } 314 315 static struct dst_entry *subflow_v4_route_req(const struct sock *sk, 316 struct sk_buff *skb, 317 struct flowi *fl, 318 struct request_sock *req, 319 u32 tw_isn) 320 { 321 struct dst_entry *dst; 322 int err; 323 324 tcp_rsk(req)->is_mptcp = 1; 325 subflow_init_req(req, sk); 326 327 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn); 328 if (!dst) 329 return NULL; 330 331 err = subflow_check_req(req, sk, skb); 332 if (err == 0) 333 return dst; 334 335 dst_release(dst); 336 if (!req->syncookie) 337 tcp_request_sock_ops.send_reset(sk, skb, 338 mptcp_get_rst_reason(skb)); 339 return NULL; 340 } 341 342 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req, 343 struct tcp_fastopen_cookie *foc, 344 enum tcp_synack_type synack_type) 345 { 346 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 347 struct inet_request_sock *ireq = inet_rsk(req); 348 349 /* clear tstamp_ok, as needed depending on cookie */ 350 if (foc && foc->len > -1) 351 ireq->tstamp_ok = 0; 352 353 if (synack_type == TCP_SYNACK_FASTOPEN) 354 mptcp_fastopen_subflow_synack_set_params(subflow, req); 355 } 356 357 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst, 358 struct flowi *fl, 359 struct request_sock *req, 360 struct tcp_fastopen_cookie *foc, 361 enum tcp_synack_type synack_type, 362 struct sk_buff *syn_skb) 363 { 364 subflow_prep_synack(sk, req, foc, synack_type); 365 366 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, 367 synack_type, syn_skb); 368 } 369 370 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 371 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, 372 struct flowi *fl, 373 struct request_sock *req, 374 struct tcp_fastopen_cookie *foc, 375 enum tcp_synack_type synack_type, 376 struct sk_buff *syn_skb) 377 { 378 subflow_prep_synack(sk, req, foc, synack_type); 379 380 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, 381 synack_type, syn_skb); 382 } 383 384 static struct dst_entry *subflow_v6_route_req(const struct sock *sk, 385 struct sk_buff *skb, 386 struct flowi *fl, 387 struct request_sock *req, 388 u32 tw_isn) 389 { 390 struct dst_entry *dst; 391 int err; 392 393 tcp_rsk(req)->is_mptcp = 1; 394 subflow_init_req(req, sk); 395 396 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn); 397 if (!dst) 398 return NULL; 399 400 err = subflow_check_req(req, sk, skb); 401 if (err == 0) 402 return dst; 403 404 dst_release(dst); 405 if (!req->syncookie) 406 tcp6_request_sock_ops.send_reset(sk, skb, 407 mptcp_get_rst_reason(skb)); 408 return NULL; 409 } 410 #endif 411 412 /* validate received truncated hmac and create hmac for third ACK */ 413 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 414 { 415 u8 hmac[SHA256_DIGEST_SIZE]; 416 u64 thmac; 417 418 subflow_generate_hmac(subflow->remote_key, subflow->local_key, 419 subflow->remote_nonce, subflow->local_nonce, 420 hmac); 421 422 thmac = get_unaligned_be64(hmac); 423 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", 424 subflow, subflow->token, thmac, subflow->thmac); 425 426 return thmac == subflow->thmac; 427 } 428 429 void mptcp_subflow_reset(struct sock *ssk) 430 { 431 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 432 struct sock *sk = subflow->conn; 433 434 /* mptcp_mp_fail_no_response() can reach here on an already closed 435 * socket 436 */ 437 if (ssk->sk_state == TCP_CLOSE) 438 return; 439 440 /* must hold: tcp_done() could drop last reference on parent */ 441 sock_hold(sk); 442 443 mptcp_send_active_reset_reason(ssk); 444 tcp_done(ssk); 445 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags)) 446 mptcp_schedule_work(sk); 447 448 sock_put(sk); 449 } 450 451 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk) 452 { 453 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; 454 } 455 456 void __mptcp_sync_state(struct sock *sk, int state) 457 { 458 struct mptcp_subflow_context *subflow; 459 struct mptcp_sock *msk = mptcp_sk(sk); 460 struct sock *ssk = msk->first; 461 462 subflow = mptcp_subflow_ctx(ssk); 463 __mptcp_propagate_sndbuf(sk, ssk); 464 if (!msk->rcvspace_init) 465 mptcp_rcv_space_init(msk, ssk); 466 467 if (sk->sk_state == TCP_SYN_SENT) { 468 /* subflow->idsn is always available is TCP_SYN_SENT state, 469 * even for the FASTOPEN scenarios 470 */ 471 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); 472 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 473 mptcp_set_state(sk, state); 474 sk->sk_state_change(sk); 475 } 476 } 477 478 static void subflow_set_remote_key(struct mptcp_sock *msk, 479 struct mptcp_subflow_context *subflow, 480 const struct mptcp_options_received *mp_opt) 481 { 482 /* active MPC subflow will reach here multiple times: 483 * at subflow_finish_connect() time and at 4th ack time 484 */ 485 if (subflow->remote_key_valid) 486 return; 487 488 subflow->remote_key_valid = 1; 489 subflow->remote_key = mp_opt->sndr_key; 490 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); 491 subflow->iasn++; 492 493 WRITE_ONCE(msk->remote_key, subflow->remote_key); 494 WRITE_ONCE(msk->ack_seq, subflow->iasn); 495 WRITE_ONCE(msk->can_ack, true); 496 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); 497 } 498 499 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk, 500 struct mptcp_subflow_context *subflow, 501 const struct mptcp_options_received *mp_opt) 502 { 503 struct mptcp_sock *msk = mptcp_sk(sk); 504 505 mptcp_data_lock(sk); 506 if (mp_opt) { 507 /* Options are available only in the non fallback cases 508 * avoid updating rx path fields otherwise 509 */ 510 WRITE_ONCE(msk->snd_una, subflow->idsn + 1); 511 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); 512 subflow_set_remote_key(msk, subflow, mp_opt); 513 } 514 515 if (!sock_owned_by_user(sk)) { 516 __mptcp_sync_state(sk, ssk->sk_state); 517 } else { 518 msk->pending_state = ssk->sk_state; 519 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); 520 } 521 mptcp_data_unlock(sk); 522 } 523 524 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 525 { 526 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 527 struct mptcp_options_received mp_opt; 528 struct sock *parent = subflow->conn; 529 struct mptcp_sock *msk; 530 531 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 532 533 /* be sure no special action on any packet other than syn-ack */ 534 if (subflow->conn_finished) 535 return; 536 537 msk = mptcp_sk(parent); 538 subflow->rel_write_seq = 1; 539 subflow->conn_finished = 1; 540 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 541 pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset); 542 543 mptcp_get_options(skb, &mp_opt); 544 if (subflow->request_mptcp) { 545 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) { 546 MPTCP_INC_STATS(sock_net(sk), 547 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); 548 mptcp_do_fallback(sk); 549 pr_fallback(msk); 550 goto fallback; 551 } 552 553 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) 554 WRITE_ONCE(msk->csum_enabled, true); 555 if (mp_opt.deny_join_id0) 556 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 557 subflow->mp_capable = 1; 558 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 559 mptcp_finish_connect(sk); 560 mptcp_active_enable(parent); 561 mptcp_propagate_state(parent, sk, subflow, &mp_opt); 562 } else if (subflow->request_join) { 563 u8 hmac[SHA256_DIGEST_SIZE]; 564 565 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) { 566 subflow->reset_reason = MPTCP_RST_EMPTCP; 567 goto do_reset; 568 } 569 570 subflow->backup = mp_opt.backup; 571 subflow->thmac = mp_opt.thmac; 572 subflow->remote_nonce = mp_opt.nonce; 573 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); 574 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n", 575 subflow, subflow->thmac, subflow->remote_nonce, 576 subflow->backup); 577 578 if (!subflow_thmac_valid(subflow)) { 579 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); 580 subflow->reset_reason = MPTCP_RST_EMPTCP; 581 goto do_reset; 582 } 583 584 if (!mptcp_finish_join(sk)) 585 goto do_reset; 586 587 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 588 subflow->local_nonce, 589 subflow->remote_nonce, 590 hmac); 591 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 592 593 subflow->mp_join = 1; 594 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 595 596 if (subflow->backup) 597 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); 598 599 if (subflow_use_different_dport(msk, sk)) { 600 pr_debug("synack inet_dport=%d %d\n", 601 ntohs(inet_sk(sk)->inet_dport), 602 ntohs(inet_sk(parent)->inet_dport)); 603 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); 604 } 605 } else if (mptcp_check_fallback(sk)) { 606 /* It looks like MPTCP is blocked, while TCP is not */ 607 if (subflow->mpc_drop) 608 mptcp_active_disable(parent); 609 fallback: 610 mptcp_propagate_state(parent, sk, subflow, NULL); 611 } 612 return; 613 614 do_reset: 615 subflow->reset_transient = 0; 616 mptcp_subflow_reset(sk); 617 } 618 619 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) 620 { 621 WARN_ON_ONCE(local_id < 0 || local_id > 255); 622 WRITE_ONCE(subflow->local_id, local_id); 623 } 624 625 static int subflow_chk_local_id(struct sock *sk) 626 { 627 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 628 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 629 int err; 630 631 if (likely(subflow->local_id >= 0)) 632 return 0; 633 634 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); 635 if (err < 0) 636 return err; 637 638 subflow_set_local_id(subflow, err); 639 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); 640 641 return 0; 642 } 643 644 static int subflow_rebuild_header(struct sock *sk) 645 { 646 int err = subflow_chk_local_id(sk); 647 648 if (unlikely(err < 0)) 649 return err; 650 651 return inet_sk_rebuild_header(sk); 652 } 653 654 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 655 static int subflow_v6_rebuild_header(struct sock *sk) 656 { 657 int err = subflow_chk_local_id(sk); 658 659 if (unlikely(err < 0)) 660 return err; 661 662 return inet6_sk_rebuild_header(sk); 663 } 664 #endif 665 666 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; 667 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; 668 669 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 670 { 671 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 672 673 pr_debug("subflow=%p\n", subflow); 674 675 /* Never answer to SYNs sent to broadcast or multicast */ 676 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 677 goto drop; 678 679 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, 680 &subflow_request_sock_ipv4_ops, 681 sk, skb); 682 drop: 683 tcp_listendrop(sk); 684 return 0; 685 } 686 687 static void subflow_v4_req_destructor(struct request_sock *req) 688 { 689 subflow_req_destructor(req); 690 tcp_request_sock_ops.destructor(req); 691 } 692 693 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 694 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; 695 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; 696 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; 697 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; 698 static struct proto tcpv6_prot_override __ro_after_init; 699 700 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 701 { 702 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 703 704 pr_debug("subflow=%p\n", subflow); 705 706 if (skb->protocol == htons(ETH_P_IP)) 707 return subflow_v4_conn_request(sk, skb); 708 709 if (!ipv6_unicast_destination(skb)) 710 goto drop; 711 712 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 713 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 714 return 0; 715 } 716 717 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, 718 &subflow_request_sock_ipv6_ops, sk, skb); 719 720 drop: 721 tcp_listendrop(sk); 722 return 0; /* don't send reset */ 723 } 724 725 static void subflow_v6_req_destructor(struct request_sock *req) 726 { 727 subflow_req_destructor(req); 728 tcp6_request_sock_ops.destructor(req); 729 } 730 #endif 731 732 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, 733 struct sock *sk_listener, 734 bool attach_listener) 735 { 736 if (ops->family == AF_INET) 737 ops = &mptcp_subflow_v4_request_sock_ops; 738 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 739 else if (ops->family == AF_INET6) 740 ops = &mptcp_subflow_v6_request_sock_ops; 741 #endif 742 743 return inet_reqsk_alloc(ops, sk_listener, attach_listener); 744 } 745 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); 746 747 /* validate hmac received in third ACK */ 748 static bool subflow_hmac_valid(const struct request_sock *req, 749 const struct mptcp_options_received *mp_opt) 750 { 751 const struct mptcp_subflow_request_sock *subflow_req; 752 u8 hmac[SHA256_DIGEST_SIZE]; 753 struct mptcp_sock *msk; 754 755 subflow_req = mptcp_subflow_rsk(req); 756 msk = subflow_req->msk; 757 if (!msk) 758 return false; 759 760 subflow_generate_hmac(READ_ONCE(msk->remote_key), 761 READ_ONCE(msk->local_key), 762 subflow_req->remote_nonce, 763 subflow_req->local_nonce, hmac); 764 765 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); 766 } 767 768 static void subflow_ulp_fallback(struct sock *sk, 769 struct mptcp_subflow_context *old_ctx) 770 { 771 struct inet_connection_sock *icsk = inet_csk(sk); 772 773 mptcp_subflow_tcp_fallback(sk, old_ctx); 774 icsk->icsk_ulp_ops = NULL; 775 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 776 tcp_sk(sk)->is_mptcp = 0; 777 778 mptcp_subflow_ops_undo_override(sk); 779 } 780 781 void mptcp_subflow_drop_ctx(struct sock *ssk) 782 { 783 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 784 785 if (!ctx) 786 return; 787 788 list_del(&mptcp_subflow_ctx(ssk)->node); 789 if (inet_csk(ssk)->icsk_ulp_ops) { 790 subflow_ulp_fallback(ssk, ctx); 791 if (ctx->conn) 792 sock_put(ctx->conn); 793 } 794 795 kfree_rcu(ctx, rcu); 796 } 797 798 void __mptcp_subflow_fully_established(struct mptcp_sock *msk, 799 struct mptcp_subflow_context *subflow, 800 const struct mptcp_options_received *mp_opt) 801 { 802 subflow_set_remote_key(msk, subflow, mp_opt); 803 WRITE_ONCE(subflow->fully_established, true); 804 WRITE_ONCE(msk->fully_established, true); 805 806 if (subflow->is_mptfo) 807 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); 808 } 809 810 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 811 struct sk_buff *skb, 812 struct request_sock *req, 813 struct dst_entry *dst, 814 struct request_sock *req_unhash, 815 bool *own_req) 816 { 817 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 818 struct mptcp_subflow_request_sock *subflow_req; 819 struct mptcp_options_received mp_opt; 820 bool fallback, fallback_is_fatal; 821 enum sk_rst_reason reason; 822 struct mptcp_sock *owner; 823 struct sock *child; 824 825 pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn); 826 827 /* After child creation we must look for MPC even when options 828 * are not parsed 829 */ 830 mp_opt.suboptions = 0; 831 832 /* hopefully temporary handling for MP_JOIN+syncookie */ 833 subflow_req = mptcp_subflow_rsk(req); 834 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; 835 fallback = !tcp_rsk(req)->is_mptcp; 836 if (fallback) 837 goto create_child; 838 839 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 840 if (subflow_req->mp_capable) { 841 /* we can receive and accept an in-window, out-of-order pkt, 842 * which may not carry the MP_CAPABLE opt even on mptcp enabled 843 * paths: always try to extract the peer key, and fallback 844 * for packets missing it. 845 * Even OoO DSS packets coming legitly after dropped or 846 * reordered MPC will cause fallback, but we don't have other 847 * options. 848 */ 849 mptcp_get_options(skb, &mp_opt); 850 if (!(mp_opt.suboptions & 851 (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK))) 852 fallback = true; 853 854 } else if (subflow_req->mp_join) { 855 mptcp_get_options(skb, &mp_opt); 856 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) || 857 !subflow_hmac_valid(req, &mp_opt) || 858 !mptcp_can_accept_new_subflow(subflow_req->msk)) { 859 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 860 fallback = true; 861 } 862 } 863 864 create_child: 865 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 866 req_unhash, own_req); 867 868 if (child && *own_req) { 869 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 870 871 tcp_rsk(req)->drop_req = false; 872 873 /* we need to fallback on ctx allocation failure and on pre-reqs 874 * checking above. In the latter scenario we additionally need 875 * to reset the context to non MPTCP status. 876 */ 877 if (!ctx || fallback) { 878 if (fallback_is_fatal) { 879 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); 880 goto dispose_child; 881 } 882 goto fallback; 883 } 884 885 /* ssk inherits options of listener sk */ 886 ctx->setsockopt_seq = listener->setsockopt_seq; 887 888 if (ctx->mp_capable) { 889 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); 890 if (!ctx->conn) 891 goto fallback; 892 893 ctx->subflow_id = 1; 894 owner = mptcp_sk(ctx->conn); 895 mptcp_pm_new_connection(owner, child, 1); 896 897 /* with OoO packets we can reach here without ingress 898 * mpc option 899 */ 900 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { 901 mptcp_pm_fully_established(owner, child); 902 ctx->pm_notified = 1; 903 } 904 } else if (ctx->mp_join) { 905 owner = subflow_req->msk; 906 if (!owner) { 907 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 908 goto dispose_child; 909 } 910 911 /* move the msk reference ownership to the subflow */ 912 subflow_req->msk = NULL; 913 ctx->conn = (struct sock *)owner; 914 915 if (subflow_use_different_sport(owner, sk)) { 916 pr_debug("ack inet_sport=%d %d\n", 917 ntohs(inet_sk(sk)->inet_sport), 918 ntohs(inet_sk((struct sock *)owner)->inet_sport)); 919 if (!mptcp_pm_sport_in_anno_list(owner, sk)) { 920 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX); 921 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); 922 goto dispose_child; 923 } 924 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX); 925 } 926 927 if (!mptcp_finish_join(child)) { 928 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child); 929 930 subflow_add_reset_reason(skb, subflow->reset_reason); 931 goto dispose_child; 932 } 933 934 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); 935 tcp_rsk(req)->drop_req = true; 936 } 937 } 938 939 /* check for expected invariant - should never trigger, just help 940 * catching earlier subtle bugs 941 */ 942 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 943 (!mptcp_subflow_ctx(child) || 944 !mptcp_subflow_ctx(child)->conn)); 945 return child; 946 947 dispose_child: 948 mptcp_subflow_drop_ctx(child); 949 tcp_rsk(req)->drop_req = true; 950 inet_csk_prepare_for_destroy_sock(child); 951 tcp_done(child); 952 reason = mptcp_get_rst_reason(skb); 953 req->rsk_ops->send_reset(sk, skb, reason); 954 955 /* The last child reference will be released by the caller */ 956 return child; 957 958 fallback: 959 if (fallback) 960 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); 961 mptcp_subflow_drop_ctx(child); 962 return child; 963 } 964 965 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; 966 static struct proto tcp_prot_override __ro_after_init; 967 968 enum mapping_status { 969 MAPPING_OK, 970 MAPPING_INVALID, 971 MAPPING_EMPTY, 972 MAPPING_DATA_FIN, 973 MAPPING_DUMMY, 974 MAPPING_BAD_CSUM, 975 MAPPING_NODSS 976 }; 977 978 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 979 { 980 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n", 981 ssn, subflow->map_subflow_seq, subflow->map_data_len); 982 } 983 984 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 985 { 986 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 987 unsigned int skb_consumed; 988 989 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 990 if (unlikely(skb_consumed >= skb->len)) { 991 DEBUG_NET_WARN_ON_ONCE(1); 992 return true; 993 } 994 995 return skb->len - skb_consumed <= subflow->map_data_len - 996 mptcp_subflow_get_map_offset(subflow); 997 } 998 999 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 1000 { 1001 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1002 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 1003 1004 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 1005 /* Mapping covers data later in the subflow stream, 1006 * currently unsupported. 1007 */ 1008 dbg_bad_map(subflow, ssn); 1009 return false; 1010 } 1011 if (unlikely(!before(ssn, subflow->map_subflow_seq + 1012 subflow->map_data_len))) { 1013 /* Mapping does covers past subflow data, invalid */ 1014 dbg_bad_map(subflow, ssn); 1015 return false; 1016 } 1017 return true; 1018 } 1019 1020 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, 1021 bool csum_reqd) 1022 { 1023 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1024 u32 offset, seq, delta; 1025 __sum16 csum; 1026 int len; 1027 1028 if (!csum_reqd) 1029 return MAPPING_OK; 1030 1031 /* mapping already validated on previous traversal */ 1032 if (subflow->map_csum_len == subflow->map_data_len) 1033 return MAPPING_OK; 1034 1035 /* traverse the receive queue, ensuring it contains a full 1036 * DSS mapping and accumulating the related csum. 1037 * Preserve the accoumlate csum across multiple calls, to compute 1038 * the csum only once 1039 */ 1040 delta = subflow->map_data_len - subflow->map_csum_len; 1041 for (;;) { 1042 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; 1043 offset = seq - TCP_SKB_CB(skb)->seq; 1044 1045 /* if the current skb has not been accounted yet, csum its contents 1046 * up to the amount covered by the current DSS 1047 */ 1048 if (offset < skb->len) { 1049 __wsum csum; 1050 1051 len = min(skb->len - offset, delta); 1052 csum = skb_checksum(skb, offset, len, 0); 1053 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, 1054 subflow->map_csum_len); 1055 1056 delta -= len; 1057 subflow->map_csum_len += len; 1058 } 1059 if (delta == 0) 1060 break; 1061 1062 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { 1063 /* if this subflow is closed, the partial mapping 1064 * will be never completed; flush the pending skbs, so 1065 * that subflow_sched_work_if_closed() can kick in 1066 */ 1067 if (unlikely(ssk->sk_state == TCP_CLOSE)) 1068 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1069 sk_eat_skb(ssk, skb); 1070 1071 /* not enough data to validate the csum */ 1072 return MAPPING_EMPTY; 1073 } 1074 1075 /* the DSS mapping for next skbs will be validated later, 1076 * when a get_mapping_status call will process such skb 1077 */ 1078 skb = skb->next; 1079 } 1080 1081 /* note that 'map_data_len' accounts only for the carried data, does 1082 * not include the eventual seq increment due to the data fin, 1083 * while the pseudo header requires the original DSS data len, 1084 * including that 1085 */ 1086 csum = __mptcp_make_csum(subflow->map_seq, 1087 subflow->map_subflow_seq, 1088 subflow->map_data_len + subflow->map_data_fin, 1089 subflow->map_data_csum); 1090 if (unlikely(csum)) { 1091 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR); 1092 return MAPPING_BAD_CSUM; 1093 } 1094 1095 subflow->valid_csum_seen = 1; 1096 return MAPPING_OK; 1097 } 1098 1099 static enum mapping_status get_mapping_status(struct sock *ssk, 1100 struct mptcp_sock *msk) 1101 { 1102 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1103 bool csum_reqd = READ_ONCE(msk->csum_enabled); 1104 struct mptcp_ext *mpext; 1105 struct sk_buff *skb; 1106 u16 data_len; 1107 u64 map_seq; 1108 1109 skb = skb_peek(&ssk->sk_receive_queue); 1110 if (!skb) 1111 return MAPPING_EMPTY; 1112 1113 if (mptcp_check_fallback(ssk)) 1114 return MAPPING_DUMMY; 1115 1116 mpext = mptcp_get_ext(skb); 1117 if (!mpext || !mpext->use_map) { 1118 if (!subflow->map_valid && !skb->len) { 1119 /* the TCP stack deliver 0 len FIN pkt to the receive 1120 * queue, that is the only 0len pkts ever expected here, 1121 * and we can admit no mapping only for 0 len pkts 1122 */ 1123 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1124 WARN_ONCE(1, "0len seq %d:%d flags %x", 1125 TCP_SKB_CB(skb)->seq, 1126 TCP_SKB_CB(skb)->end_seq, 1127 TCP_SKB_CB(skb)->tcp_flags); 1128 sk_eat_skb(ssk, skb); 1129 return MAPPING_EMPTY; 1130 } 1131 1132 /* If the required DSS has likely been dropped by a middlebox */ 1133 if (!subflow->map_valid) 1134 return MAPPING_NODSS; 1135 1136 goto validate_seq; 1137 } 1138 1139 trace_get_mapping_status(mpext); 1140 1141 data_len = mpext->data_len; 1142 if (data_len == 0) { 1143 pr_debug("infinite mapping received\n"); 1144 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 1145 subflow->map_data_len = 0; 1146 return MAPPING_INVALID; 1147 } 1148 1149 if (mpext->data_fin == 1) { 1150 u64 data_fin_seq; 1151 1152 if (data_len == 1) { 1153 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, 1154 mpext->dsn64); 1155 pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq); 1156 if (subflow->map_valid) { 1157 /* A DATA_FIN might arrive in a DSS 1158 * option before the previous mapping 1159 * has been fully consumed. Continue 1160 * handling the existing mapping. 1161 */ 1162 skb_ext_del(skb, SKB_EXT_MPTCP); 1163 return MAPPING_OK; 1164 } 1165 1166 if (updated) 1167 mptcp_schedule_work((struct sock *)msk); 1168 1169 return MAPPING_DATA_FIN; 1170 } 1171 1172 data_fin_seq = mpext->data_seq + data_len - 1; 1173 1174 /* If mpext->data_seq is a 32-bit value, data_fin_seq must also 1175 * be limited to 32 bits. 1176 */ 1177 if (!mpext->dsn64) 1178 data_fin_seq &= GENMASK_ULL(31, 0); 1179 1180 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); 1181 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n", 1182 data_fin_seq, mpext->dsn64); 1183 1184 /* Adjust for DATA_FIN using 1 byte of sequence space */ 1185 data_len--; 1186 } 1187 1188 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64); 1189 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); 1190 1191 if (subflow->map_valid) { 1192 /* Allow replacing only with an identical map */ 1193 if (subflow->map_seq == map_seq && 1194 subflow->map_subflow_seq == mpext->subflow_seq && 1195 subflow->map_data_len == data_len && 1196 subflow->map_csum_reqd == mpext->csum_reqd) { 1197 skb_ext_del(skb, SKB_EXT_MPTCP); 1198 goto validate_csum; 1199 } 1200 1201 /* If this skb data are fully covered by the current mapping, 1202 * the new map would need caching, which is not supported 1203 */ 1204 if (skb_is_fully_mapped(ssk, skb)) { 1205 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); 1206 return MAPPING_INVALID; 1207 } 1208 1209 /* will validate the next map after consuming the current one */ 1210 goto validate_csum; 1211 } 1212 1213 subflow->map_seq = map_seq; 1214 subflow->map_subflow_seq = mpext->subflow_seq; 1215 subflow->map_data_len = data_len; 1216 subflow->map_valid = 1; 1217 subflow->map_data_fin = mpext->data_fin; 1218 subflow->mpc_map = mpext->mpc_map; 1219 subflow->map_csum_reqd = mpext->csum_reqd; 1220 subflow->map_csum_len = 0; 1221 subflow->map_data_csum = csum_unfold(mpext->csum); 1222 1223 /* Cfr RFC 8684 Section 3.3.0 */ 1224 if (unlikely(subflow->map_csum_reqd != csum_reqd)) 1225 return MAPPING_INVALID; 1226 1227 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n", 1228 subflow->map_seq, subflow->map_subflow_seq, 1229 subflow->map_data_len, subflow->map_csum_reqd, 1230 subflow->map_data_csum); 1231 1232 validate_seq: 1233 /* we revalidate valid mapping on new skb, because we must ensure 1234 * the current skb is completely covered by the available mapping 1235 */ 1236 if (!validate_mapping(ssk, skb)) { 1237 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH); 1238 return MAPPING_INVALID; 1239 } 1240 1241 skb_ext_del(skb, SKB_EXT_MPTCP); 1242 1243 validate_csum: 1244 return validate_data_csum(ssk, skb, csum_reqd); 1245 } 1246 1247 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, 1248 u64 limit) 1249 { 1250 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1251 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 1252 struct tcp_sock *tp = tcp_sk(ssk); 1253 u32 offset, incr, avail_len; 1254 1255 offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; 1256 if (WARN_ON_ONCE(offset > skb->len)) 1257 goto out; 1258 1259 avail_len = skb->len - offset; 1260 incr = limit >= avail_len ? avail_len + fin : limit; 1261 1262 pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len, 1263 offset, subflow->map_subflow_seq); 1264 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); 1265 tcp_sk(ssk)->copied_seq += incr; 1266 1267 out: 1268 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) 1269 sk_eat_skb(ssk, skb); 1270 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) 1271 subflow->map_valid = 0; 1272 } 1273 1274 /* sched mptcp worker to remove the subflow if no more data is pending */ 1275 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) 1276 { 1277 struct sock *sk = (struct sock *)msk; 1278 1279 if (likely(ssk->sk_state != TCP_CLOSE && 1280 (ssk->sk_state != TCP_CLOSE_WAIT || 1281 inet_sk_state_load(sk) != TCP_ESTABLISHED))) 1282 return; 1283 1284 if (skb_queue_empty(&ssk->sk_receive_queue) && 1285 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 1286 mptcp_schedule_work(sk); 1287 } 1288 1289 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) 1290 { 1291 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 1292 1293 if (subflow->mp_join) 1294 return false; 1295 else if (READ_ONCE(msk->csum_enabled)) 1296 return !subflow->valid_csum_seen; 1297 else 1298 return READ_ONCE(msk->allow_infinite_fallback); 1299 } 1300 1301 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) 1302 { 1303 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1304 unsigned long fail_tout; 1305 1306 /* graceful failure can happen only on the MPC subflow */ 1307 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) 1308 return; 1309 1310 /* since the close timeout take precedence on the fail one, 1311 * no need to start the latter when the first is already set 1312 */ 1313 if (sock_flag((struct sock *)msk, SOCK_DEAD)) 1314 return; 1315 1316 /* we don't need extreme accuracy here, use a zero fail_tout as special 1317 * value meaning no fail timeout at all; 1318 */ 1319 fail_tout = jiffies + TCP_RTO_MAX; 1320 if (!fail_tout) 1321 fail_tout = 1; 1322 WRITE_ONCE(subflow->fail_tout, fail_tout); 1323 tcp_send_ack(ssk); 1324 1325 mptcp_reset_tout_timer(msk, subflow->fail_tout); 1326 } 1327 1328 static bool subflow_check_data_avail(struct sock *ssk) 1329 { 1330 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1331 enum mapping_status status; 1332 struct mptcp_sock *msk; 1333 struct sk_buff *skb; 1334 1335 if (!skb_peek(&ssk->sk_receive_queue)) 1336 WRITE_ONCE(subflow->data_avail, false); 1337 if (subflow->data_avail) 1338 return true; 1339 1340 msk = mptcp_sk(subflow->conn); 1341 for (;;) { 1342 u64 ack_seq; 1343 u64 old_ack; 1344 1345 status = get_mapping_status(ssk, msk); 1346 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); 1347 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY || 1348 status == MAPPING_BAD_CSUM || status == MAPPING_NODSS)) 1349 goto fallback; 1350 1351 if (status != MAPPING_OK) 1352 goto no_data; 1353 1354 skb = skb_peek(&ssk->sk_receive_queue); 1355 if (WARN_ON_ONCE(!skb)) 1356 goto no_data; 1357 1358 if (unlikely(!READ_ONCE(msk->can_ack))) 1359 goto fallback; 1360 1361 old_ack = READ_ONCE(msk->ack_seq); 1362 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 1363 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack, 1364 ack_seq); 1365 if (unlikely(before64(ack_seq, old_ack))) { 1366 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); 1367 continue; 1368 } 1369 1370 WRITE_ONCE(subflow->data_avail, true); 1371 break; 1372 } 1373 return true; 1374 1375 no_data: 1376 subflow_sched_work_if_closed(msk, ssk); 1377 return false; 1378 1379 fallback: 1380 if (!__mptcp_check_fallback(msk)) { 1381 /* RFC 8684 section 3.7. */ 1382 if (status == MAPPING_BAD_CSUM && 1383 (subflow->mp_join || subflow->valid_csum_seen)) { 1384 subflow->send_mp_fail = 1; 1385 1386 if (!READ_ONCE(msk->allow_infinite_fallback)) { 1387 subflow->reset_transient = 0; 1388 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; 1389 goto reset; 1390 } 1391 mptcp_subflow_fail(msk, ssk); 1392 WRITE_ONCE(subflow->data_avail, true); 1393 return true; 1394 } 1395 1396 if (!subflow_can_fallback(subflow) && subflow->map_data_len) { 1397 /* fatal protocol error, close the socket. 1398 * subflow_error_report() will introduce the appropriate barriers 1399 */ 1400 subflow->reset_transient = 0; 1401 subflow->reset_reason = status == MAPPING_NODSS ? 1402 MPTCP_RST_EMIDDLEBOX : 1403 MPTCP_RST_EMPTCP; 1404 1405 reset: 1406 WRITE_ONCE(ssk->sk_err, EBADMSG); 1407 tcp_set_state(ssk, TCP_CLOSE); 1408 while ((skb = skb_peek(&ssk->sk_receive_queue))) 1409 sk_eat_skb(ssk, skb); 1410 mptcp_send_active_reset_reason(ssk); 1411 WRITE_ONCE(subflow->data_avail, false); 1412 return false; 1413 } 1414 1415 mptcp_do_fallback(ssk); 1416 } 1417 1418 skb = skb_peek(&ssk->sk_receive_queue); 1419 subflow->map_valid = 1; 1420 subflow->map_seq = READ_ONCE(msk->ack_seq); 1421 subflow->map_data_len = skb->len; 1422 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 1423 WRITE_ONCE(subflow->data_avail, true); 1424 return true; 1425 } 1426 1427 bool mptcp_subflow_data_available(struct sock *sk) 1428 { 1429 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1430 1431 /* check if current mapping is still valid */ 1432 if (subflow->map_valid && 1433 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 1434 subflow->map_valid = 0; 1435 WRITE_ONCE(subflow->data_avail, false); 1436 1437 pr_debug("Done with mapping: seq=%u data_len=%u\n", 1438 subflow->map_subflow_seq, 1439 subflow->map_data_len); 1440 } 1441 1442 return subflow_check_data_avail(sk); 1443 } 1444 1445 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, 1446 * not the ssk one. 1447 * 1448 * In mptcp, rwin is about the mptcp-level connection data. 1449 * 1450 * Data that is still on the ssk rx queue can thus be ignored, 1451 * as far as mptcp peer is concerned that data is still inflight. 1452 * DSS ACK is updated when skb is moved to the mptcp rx queue. 1453 */ 1454 void mptcp_space(const struct sock *ssk, int *space, int *full_space) 1455 { 1456 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1457 const struct sock *sk = subflow->conn; 1458 1459 *space = __mptcp_space(sk); 1460 *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); 1461 } 1462 1463 static void subflow_error_report(struct sock *ssk) 1464 { 1465 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1466 1467 /* bail early if this is a no-op, so that we avoid introducing a 1468 * problematic lockdep dependency between TCP accept queue lock 1469 * and msk socket spinlock 1470 */ 1471 if (!sk->sk_socket) 1472 return; 1473 1474 mptcp_data_lock(sk); 1475 if (!sock_owned_by_user(sk)) 1476 __mptcp_error_report(sk); 1477 else 1478 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags); 1479 mptcp_data_unlock(sk); 1480 } 1481 1482 static void subflow_data_ready(struct sock *sk) 1483 { 1484 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1485 u16 state = 1 << inet_sk_state_load(sk); 1486 struct sock *parent = subflow->conn; 1487 struct mptcp_sock *msk; 1488 1489 trace_sk_data_ready(sk); 1490 1491 msk = mptcp_sk(parent); 1492 if (state & TCPF_LISTEN) { 1493 /* MPJ subflow are removed from accept queue before reaching here, 1494 * avoid stray wakeups 1495 */ 1496 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) 1497 return; 1498 1499 parent->sk_data_ready(parent); 1500 return; 1501 } 1502 1503 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && 1504 !subflow->mp_join && !(state & TCPF_CLOSE)); 1505 1506 if (mptcp_subflow_data_available(sk)) { 1507 mptcp_data_ready(parent, sk); 1508 1509 /* subflow-level lowat test are not relevant. 1510 * respect the msk-level threshold eventually mandating an immediate ack 1511 */ 1512 if (mptcp_data_avail(msk) < parent->sk_rcvlowat && 1513 (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss) 1514 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; 1515 } else if (unlikely(sk->sk_err)) { 1516 subflow_error_report(sk); 1517 } 1518 } 1519 1520 static void subflow_write_space(struct sock *ssk) 1521 { 1522 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; 1523 1524 mptcp_propagate_sndbuf(sk, ssk); 1525 mptcp_write_space(sk); 1526 } 1527 1528 static const struct inet_connection_sock_af_ops * 1529 subflow_default_af_ops(struct sock *sk) 1530 { 1531 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1532 if (sk->sk_family == AF_INET6) 1533 return &subflow_v6_specific; 1534 #endif 1535 return &subflow_specific; 1536 } 1537 1538 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1539 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 1540 { 1541 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1542 struct inet_connection_sock *icsk = inet_csk(sk); 1543 const struct inet_connection_sock_af_ops *target; 1544 1545 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 1546 1547 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n", 1548 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 1549 1550 if (likely(icsk->icsk_af_ops == target)) 1551 return; 1552 1553 subflow->icsk_af_ops = icsk->icsk_af_ops; 1554 icsk->icsk_af_ops = target; 1555 } 1556 #endif 1557 1558 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 1559 struct sockaddr_storage *addr, 1560 unsigned short family) 1561 { 1562 memset(addr, 0, sizeof(*addr)); 1563 addr->ss_family = family; 1564 if (addr->ss_family == AF_INET) { 1565 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; 1566 1567 if (info->family == AF_INET) 1568 in_addr->sin_addr = info->addr; 1569 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1570 else if (ipv6_addr_v4mapped(&info->addr6)) 1571 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3]; 1572 #endif 1573 in_addr->sin_port = info->port; 1574 } 1575 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1576 else if (addr->ss_family == AF_INET6) { 1577 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; 1578 1579 if (info->family == AF_INET) 1580 ipv6_addr_set_v4mapped(info->addr.s_addr, 1581 &in6_addr->sin6_addr); 1582 else 1583 in6_addr->sin6_addr = info->addr6; 1584 in6_addr->sin6_port = info->port; 1585 } 1586 #endif 1587 } 1588 1589 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local, 1590 const struct mptcp_addr_info *remote) 1591 { 1592 struct mptcp_sock *msk = mptcp_sk(sk); 1593 struct mptcp_subflow_context *subflow; 1594 int local_id = local->addr.id; 1595 struct sockaddr_storage addr; 1596 int remote_id = remote->id; 1597 int err = -ENOTCONN; 1598 struct socket *sf; 1599 struct sock *ssk; 1600 u32 remote_token; 1601 int addrlen; 1602 1603 /* The userspace PM sent the request too early? */ 1604 if (!mptcp_is_fully_established(sk)) 1605 goto err_out; 1606 1607 err = mptcp_subflow_create_socket(sk, local->addr.family, &sf); 1608 if (err) { 1609 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCREATSKERR); 1610 pr_debug("msk=%p local=%d remote=%d create sock error: %d\n", 1611 msk, local_id, remote_id, err); 1612 goto err_out; 1613 } 1614 1615 ssk = sf->sk; 1616 subflow = mptcp_subflow_ctx(ssk); 1617 do { 1618 get_random_bytes(&subflow->local_nonce, sizeof(u32)); 1619 } while (!subflow->local_nonce); 1620 1621 /* if 'IPADDRANY', the ID will be set later, after the routing */ 1622 if (local->addr.family == AF_INET) { 1623 if (!local->addr.addr.s_addr) 1624 local_id = -1; 1625 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1626 } else if (sk->sk_family == AF_INET6) { 1627 if (ipv6_addr_any(&local->addr.addr6)) 1628 local_id = -1; 1629 #endif 1630 } 1631 1632 if (local_id >= 0) 1633 subflow_set_local_id(subflow, local_id); 1634 1635 subflow->remote_key_valid = 1; 1636 subflow->remote_key = READ_ONCE(msk->remote_key); 1637 subflow->local_key = READ_ONCE(msk->local_key); 1638 subflow->token = msk->token; 1639 mptcp_info2sockaddr(&local->addr, &addr, ssk->sk_family); 1640 1641 addrlen = sizeof(struct sockaddr_in); 1642 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1643 if (addr.ss_family == AF_INET6) 1644 addrlen = sizeof(struct sockaddr_in6); 1645 #endif 1646 ssk->sk_bound_dev_if = local->ifindex; 1647 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); 1648 if (err) { 1649 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXBINDERR); 1650 pr_debug("msk=%p local=%d remote=%d bind error: %d\n", 1651 msk, local_id, remote_id, err); 1652 goto failed; 1653 } 1654 1655 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 1656 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk, 1657 remote_token, local_id, remote_id); 1658 subflow->remote_token = remote_token; 1659 WRITE_ONCE(subflow->remote_id, remote_id); 1660 subflow->request_join = 1; 1661 subflow->request_bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1662 subflow->subflow_id = msk->subflow_id++; 1663 mptcp_info2sockaddr(remote, &addr, ssk->sk_family); 1664 1665 sock_hold(ssk); 1666 list_add_tail(&subflow->node, &msk->conn_list); 1667 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); 1668 if (err && err != -EINPROGRESS) { 1669 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCONNECTERR); 1670 pr_debug("msk=%p local=%d remote=%d connect error: %d\n", 1671 msk, local_id, remote_id, err); 1672 goto failed_unlink; 1673 } 1674 1675 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTX); 1676 1677 /* discard the subflow socket */ 1678 mptcp_sock_graft(ssk, sk->sk_socket); 1679 iput(SOCK_INODE(sf)); 1680 WRITE_ONCE(msk->allow_infinite_fallback, false); 1681 mptcp_stop_tout_timer(sk); 1682 return 0; 1683 1684 failed_unlink: 1685 list_del(&subflow->node); 1686 sock_put(mptcp_subflow_tcp_sock(subflow)); 1687 1688 failed: 1689 subflow->disposable = 1; 1690 sock_release(sf); 1691 1692 err_out: 1693 /* we account subflows before the creation, and this failures will not 1694 * be caught by sk_state_change() 1695 */ 1696 mptcp_pm_close_subflow(msk); 1697 return err; 1698 } 1699 1700 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child) 1701 { 1702 #ifdef CONFIG_SOCK_CGROUP_DATA 1703 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data, 1704 *child_skcd = &child->sk_cgrp_data; 1705 1706 /* only the additional subflows created by kworkers have to be modified */ 1707 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) != 1708 cgroup_id(sock_cgroup_ptr(child_skcd))) { 1709 #ifdef CONFIG_MEMCG 1710 struct mem_cgroup *memcg = parent->sk_memcg; 1711 1712 mem_cgroup_sk_free(child); 1713 if (memcg && css_tryget(&memcg->css)) 1714 child->sk_memcg = memcg; 1715 #endif /* CONFIG_MEMCG */ 1716 1717 cgroup_sk_free(child_skcd); 1718 *child_skcd = *parent_skcd; 1719 cgroup_sk_clone(child_skcd); 1720 } 1721 #endif /* CONFIG_SOCK_CGROUP_DATA */ 1722 } 1723 1724 static void mptcp_subflow_ops_override(struct sock *ssk) 1725 { 1726 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1727 if (ssk->sk_prot == &tcpv6_prot) 1728 ssk->sk_prot = &tcpv6_prot_override; 1729 else 1730 #endif 1731 ssk->sk_prot = &tcp_prot_override; 1732 } 1733 1734 static void mptcp_subflow_ops_undo_override(struct sock *ssk) 1735 { 1736 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1737 if (ssk->sk_prot == &tcpv6_prot_override) 1738 ssk->sk_prot = &tcpv6_prot; 1739 else 1740 #endif 1741 ssk->sk_prot = &tcp_prot; 1742 } 1743 1744 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 1745 struct socket **new_sock) 1746 { 1747 struct mptcp_subflow_context *subflow; 1748 struct net *net = sock_net(sk); 1749 struct socket *sf; 1750 int err; 1751 1752 /* un-accepted server sockets can reach here - on bad configuration 1753 * bail early to avoid greater trouble later 1754 */ 1755 if (unlikely(!sk->sk_socket)) 1756 return -EINVAL; 1757 1758 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); 1759 if (err) 1760 return err; 1761 1762 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); 1763 1764 err = security_mptcp_add_subflow(sk, sf->sk); 1765 if (err) 1766 goto err_free; 1767 1768 /* the newly created socket has to be in the same cgroup as its parent */ 1769 mptcp_attach_cgroup(sk, sf->sk); 1770 1771 /* kernel sockets do not by default acquire net ref, but TCP timer 1772 * needs it. 1773 * Update ns_tracker to current stack trace and refcounted tracker. 1774 */ 1775 __netns_tracker_free(net, &sf->sk->ns_tracker, false); 1776 sf->sk->sk_net_refcnt = 1; 1777 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); 1778 sock_inuse_add(net, 1); 1779 err = tcp_set_ulp(sf->sk, "mptcp"); 1780 if (err) 1781 goto err_free; 1782 1783 mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk); 1784 release_sock(sf->sk); 1785 1786 /* the newly created socket really belongs to the owning MPTCP 1787 * socket, even if for additional subflows the allocation is performed 1788 * by a kernel workqueue. Adjust inode references, so that the 1789 * procfs/diag interfaces really show this one belonging to the correct 1790 * user. 1791 */ 1792 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; 1793 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; 1794 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1795 1796 subflow = mptcp_subflow_ctx(sf->sk); 1797 pr_debug("subflow=%p\n", subflow); 1798 1799 *new_sock = sf; 1800 sock_hold(sk); 1801 subflow->conn = sk; 1802 mptcp_subflow_ops_override(sf->sk); 1803 1804 return 0; 1805 1806 err_free: 1807 release_sock(sf->sk); 1808 sock_release(sf); 1809 return err; 1810 } 1811 1812 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 1813 gfp_t priority) 1814 { 1815 struct inet_connection_sock *icsk = inet_csk(sk); 1816 struct mptcp_subflow_context *ctx; 1817 1818 ctx = kzalloc(sizeof(*ctx), priority); 1819 if (!ctx) 1820 return NULL; 1821 1822 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 1823 INIT_LIST_HEAD(&ctx->node); 1824 INIT_LIST_HEAD(&ctx->delegated_node); 1825 1826 pr_debug("subflow=%p\n", ctx); 1827 1828 ctx->tcp_sock = sk; 1829 WRITE_ONCE(ctx->local_id, -1); 1830 1831 return ctx; 1832 } 1833 1834 static void __subflow_state_change(struct sock *sk) 1835 { 1836 struct socket_wq *wq; 1837 1838 rcu_read_lock(); 1839 wq = rcu_dereference(sk->sk_wq); 1840 if (skwq_has_sleeper(wq)) 1841 wake_up_interruptible_all(&wq->wait); 1842 rcu_read_unlock(); 1843 } 1844 1845 static bool subflow_is_done(const struct sock *sk) 1846 { 1847 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 1848 } 1849 1850 static void subflow_state_change(struct sock *sk) 1851 { 1852 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1853 struct sock *parent = subflow->conn; 1854 struct mptcp_sock *msk; 1855 1856 __subflow_state_change(sk); 1857 1858 msk = mptcp_sk(parent); 1859 if (subflow_simultaneous_connect(sk)) { 1860 mptcp_do_fallback(sk); 1861 pr_fallback(msk); 1862 subflow->conn_finished = 1; 1863 mptcp_propagate_state(parent, sk, subflow, NULL); 1864 } 1865 1866 /* as recvmsg() does not acquire the subflow socket for ssk selection 1867 * a fin packet carrying a DSS can be unnoticed if we don't trigger 1868 * the data available machinery here. 1869 */ 1870 if (mptcp_subflow_data_available(sk)) 1871 mptcp_data_ready(parent, sk); 1872 else if (unlikely(sk->sk_err)) 1873 subflow_error_report(sk); 1874 1875 subflow_sched_work_if_closed(mptcp_sk(parent), sk); 1876 1877 /* when the fallback subflow closes the rx side, trigger a 'dummy' 1878 * ingress data fin, so that the msk state will follow along 1879 */ 1880 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk && 1881 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) 1882 mptcp_schedule_work(parent); 1883 } 1884 1885 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) 1886 { 1887 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; 1888 struct request_sock *req, *head, *tail; 1889 struct mptcp_subflow_context *subflow; 1890 struct sock *sk, *ssk; 1891 1892 /* Due to lock dependencies no relevant lock can be acquired under rskq_lock. 1893 * Splice the req list, so that accept() can not reach the pending ssk after 1894 * the listener socket is released below. 1895 */ 1896 spin_lock_bh(&queue->rskq_lock); 1897 head = queue->rskq_accept_head; 1898 tail = queue->rskq_accept_tail; 1899 queue->rskq_accept_head = NULL; 1900 queue->rskq_accept_tail = NULL; 1901 spin_unlock_bh(&queue->rskq_lock); 1902 1903 if (!head) 1904 return; 1905 1906 /* can't acquire the msk socket lock under the subflow one, 1907 * or will cause ABBA deadlock 1908 */ 1909 release_sock(listener_ssk); 1910 1911 for (req = head; req; req = req->dl_next) { 1912 ssk = req->sk; 1913 if (!sk_is_mptcp(ssk)) 1914 continue; 1915 1916 subflow = mptcp_subflow_ctx(ssk); 1917 if (!subflow || !subflow->conn) 1918 continue; 1919 1920 sk = subflow->conn; 1921 sock_hold(sk); 1922 1923 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 1924 __mptcp_unaccepted_force_close(sk); 1925 release_sock(sk); 1926 1927 /* lockdep will report a false positive ABBA deadlock 1928 * between cancel_work_sync and the listener socket. 1929 * The involved locks belong to different sockets WRT 1930 * the existing AB chain. 1931 * Using a per socket key is problematic as key 1932 * deregistration requires process context and must be 1933 * performed at socket disposal time, in atomic 1934 * context. 1935 * Just tell lockdep to consider the listener socket 1936 * released here. 1937 */ 1938 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); 1939 mptcp_cancel_work(sk); 1940 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_); 1941 1942 sock_put(sk); 1943 } 1944 1945 /* we are still under the listener msk socket lock */ 1946 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); 1947 1948 /* restore the listener queue, to let the TCP code clean it up */ 1949 spin_lock_bh(&queue->rskq_lock); 1950 WARN_ON_ONCE(queue->rskq_accept_head); 1951 queue->rskq_accept_head = head; 1952 queue->rskq_accept_tail = tail; 1953 spin_unlock_bh(&queue->rskq_lock); 1954 } 1955 1956 static int subflow_ulp_init(struct sock *sk) 1957 { 1958 struct inet_connection_sock *icsk = inet_csk(sk); 1959 struct mptcp_subflow_context *ctx; 1960 struct tcp_sock *tp = tcp_sk(sk); 1961 int err = 0; 1962 1963 /* disallow attaching ULP to a socket unless it has been 1964 * created with sock_create_kern() 1965 */ 1966 if (!sk->sk_kern_sock) { 1967 err = -EOPNOTSUPP; 1968 goto out; 1969 } 1970 1971 ctx = subflow_create_ctx(sk, GFP_KERNEL); 1972 if (!ctx) { 1973 err = -ENOMEM; 1974 goto out; 1975 } 1976 1977 pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family); 1978 1979 tp->is_mptcp = 1; 1980 ctx->icsk_af_ops = icsk->icsk_af_ops; 1981 icsk->icsk_af_ops = subflow_default_af_ops(sk); 1982 ctx->tcp_state_change = sk->sk_state_change; 1983 ctx->tcp_error_report = sk->sk_error_report; 1984 1985 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable); 1986 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space); 1987 1988 sk->sk_data_ready = subflow_data_ready; 1989 sk->sk_write_space = subflow_write_space; 1990 sk->sk_state_change = subflow_state_change; 1991 sk->sk_error_report = subflow_error_report; 1992 out: 1993 return err; 1994 } 1995 1996 static void subflow_ulp_release(struct sock *ssk) 1997 { 1998 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 1999 bool release = true; 2000 struct sock *sk; 2001 2002 if (!ctx) 2003 return; 2004 2005 sk = ctx->conn; 2006 if (sk) { 2007 /* if the msk has been orphaned, keep the ctx 2008 * alive, will be freed by __mptcp_close_ssk(), 2009 * when the subflow is still unaccepted 2010 */ 2011 release = ctx->disposable || list_empty(&ctx->node); 2012 2013 /* inet_child_forget() does not call sk_state_change(), 2014 * explicitly trigger the socket close machinery 2015 */ 2016 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, 2017 &mptcp_sk(sk)->flags)) 2018 mptcp_schedule_work(sk); 2019 sock_put(sk); 2020 } 2021 2022 mptcp_subflow_ops_undo_override(ssk); 2023 if (release) 2024 kfree_rcu(ctx, rcu); 2025 } 2026 2027 static void subflow_ulp_clone(const struct request_sock *req, 2028 struct sock *newsk, 2029 const gfp_t priority) 2030 { 2031 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 2032 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 2033 struct mptcp_subflow_context *new_ctx; 2034 2035 if (!tcp_rsk(req)->is_mptcp || 2036 (!subflow_req->mp_capable && !subflow_req->mp_join)) { 2037 subflow_ulp_fallback(newsk, old_ctx); 2038 return; 2039 } 2040 2041 new_ctx = subflow_create_ctx(newsk, priority); 2042 if (!new_ctx) { 2043 subflow_ulp_fallback(newsk, old_ctx); 2044 return; 2045 } 2046 2047 new_ctx->conn_finished = 1; 2048 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 2049 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 2050 new_ctx->tcp_error_report = old_ctx->tcp_error_report; 2051 new_ctx->rel_write_seq = 1; 2052 2053 if (subflow_req->mp_capable) { 2054 /* see comments in subflow_syn_recv_sock(), MPTCP connection 2055 * is fully established only after we receive the remote key 2056 */ 2057 new_ctx->mp_capable = 1; 2058 new_ctx->local_key = subflow_req->local_key; 2059 new_ctx->token = subflow_req->token; 2060 new_ctx->ssn_offset = subflow_req->ssn_offset; 2061 new_ctx->idsn = subflow_req->idsn; 2062 2063 /* this is the first subflow, id is always 0 */ 2064 subflow_set_local_id(new_ctx, 0); 2065 } else if (subflow_req->mp_join) { 2066 new_ctx->ssn_offset = subflow_req->ssn_offset; 2067 new_ctx->mp_join = 1; 2068 WRITE_ONCE(new_ctx->fully_established, true); 2069 new_ctx->remote_key_valid = 1; 2070 new_ctx->backup = subflow_req->backup; 2071 new_ctx->request_bkup = subflow_req->request_bkup; 2072 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id); 2073 new_ctx->token = subflow_req->token; 2074 new_ctx->thmac = subflow_req->thmac; 2075 2076 /* the subflow req id is valid, fetched via subflow_check_req() 2077 * and subflow_token_join_request() 2078 */ 2079 subflow_set_local_id(new_ctx, subflow_req->local_id); 2080 } 2081 } 2082 2083 static void tcp_release_cb_override(struct sock *ssk) 2084 { 2085 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 2086 long status; 2087 2088 /* process and clear all the pending actions, but leave the subflow into 2089 * the napi queue. To respect locking, only the same CPU that originated 2090 * the action can touch the list. mptcp_napi_poll will take care of it. 2091 */ 2092 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); 2093 if (status) 2094 mptcp_subflow_process_delegated(ssk, status); 2095 2096 tcp_release_cb(ssk); 2097 } 2098 2099 static int tcp_abort_override(struct sock *ssk, int err) 2100 { 2101 /* closing a listener subflow requires a great deal of care. 2102 * keep it simple and just prevent such operation 2103 */ 2104 if (inet_sk_state_load(ssk) == TCP_LISTEN) 2105 return -EINVAL; 2106 2107 return tcp_abort(ssk, err); 2108 } 2109 2110 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 2111 .name = "mptcp", 2112 .owner = THIS_MODULE, 2113 .init = subflow_ulp_init, 2114 .release = subflow_ulp_release, 2115 .clone = subflow_ulp_clone, 2116 }; 2117 2118 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 2119 { 2120 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 2121 2122 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 2123 subflow_ops->obj_size, 0, 2124 SLAB_ACCOUNT | 2125 SLAB_TYPESAFE_BY_RCU, 2126 NULL); 2127 if (!subflow_ops->slab) 2128 return -ENOMEM; 2129 2130 return 0; 2131 } 2132 2133 void __init mptcp_subflow_init(void) 2134 { 2135 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; 2136 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; 2137 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; 2138 2139 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) 2140 panic("MPTCP: failed to init subflow v4 request sock ops\n"); 2141 2142 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 2143 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; 2144 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack; 2145 2146 subflow_specific = ipv4_specific; 2147 subflow_specific.conn_request = subflow_v4_conn_request; 2148 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 2149 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 2150 subflow_specific.rebuild_header = subflow_rebuild_header; 2151 2152 tcp_prot_override = tcp_prot; 2153 tcp_prot_override.release_cb = tcp_release_cb_override; 2154 tcp_prot_override.diag_destroy = tcp_abort_override; 2155 2156 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2157 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock 2158 * structures for v4 and v6 have the same size. It should not changed in 2159 * the future but better to make sure to be warned if it is no longer 2160 * the case. 2161 */ 2162 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); 2163 2164 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; 2165 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; 2166 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; 2167 2168 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) 2169 panic("MPTCP: failed to init subflow v6 request sock ops\n"); 2170 2171 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 2172 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; 2173 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack; 2174 2175 subflow_v6_specific = ipv6_specific; 2176 subflow_v6_specific.conn_request = subflow_v6_conn_request; 2177 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 2178 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 2179 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header; 2180 2181 subflow_v6m_specific = subflow_v6_specific; 2182 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 2183 subflow_v6m_specific.send_check = ipv4_specific.send_check; 2184 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 2185 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 2186 subflow_v6m_specific.rebuild_header = subflow_rebuild_header; 2187 2188 tcpv6_prot_override = tcpv6_prot; 2189 tcpv6_prot_override.release_cb = tcp_release_cb_override; 2190 tcpv6_prot_override.diag_destroy = tcp_abort_override; 2191 #endif 2192 2193 mptcp_diag_subflow_init(&subflow_ulp_ops); 2194 2195 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 2196 panic("MPTCP: failed to register subflows to ULP\n"); 2197 } 2198