1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/algapi.h> 13 #include <net/sock.h> 14 #include <net/inet_common.h> 15 #include <net/inet_hashtables.h> 16 #include <net/protocol.h> 17 #include <net/tcp.h> 18 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 19 #include <net/ip6_route.h> 20 #endif 21 #include <net/mptcp.h> 22 #include "protocol.h" 23 #include "mib.h" 24 25 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, 26 enum linux_mptcp_mib_field field) 27 { 28 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); 29 } 30 31 static int subflow_rebuild_header(struct sock *sk) 32 { 33 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 34 int local_id, err = 0; 35 36 if (subflow->request_mptcp && !subflow->token) { 37 pr_debug("subflow=%p", sk); 38 err = mptcp_token_new_connect(sk); 39 } else if (subflow->request_join && !subflow->local_nonce) { 40 struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn; 41 42 pr_debug("subflow=%p", sk); 43 44 do { 45 get_random_bytes(&subflow->local_nonce, sizeof(u32)); 46 } while (!subflow->local_nonce); 47 48 if (subflow->local_id) 49 goto out; 50 51 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); 52 if (local_id < 0) 53 return -EINVAL; 54 55 subflow->local_id = local_id; 56 } 57 58 out: 59 if (err) 60 return err; 61 62 return subflow->icsk_af_ops->rebuild_header(sk); 63 } 64 65 static void subflow_req_destructor(struct request_sock *req) 66 { 67 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 68 69 pr_debug("subflow_req=%p", subflow_req); 70 71 if (subflow_req->mp_capable) 72 mptcp_token_destroy_request(subflow_req->token); 73 tcp_request_sock_ops.destructor(req); 74 } 75 76 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, 77 void *hmac) 78 { 79 u8 msg[8]; 80 81 put_unaligned_be32(nonce1, &msg[0]); 82 put_unaligned_be32(nonce2, &msg[4]); 83 84 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 85 } 86 87 /* validate received token and create truncated hmac and nonce for SYN-ACK */ 88 static bool subflow_token_join_request(struct request_sock *req, 89 const struct sk_buff *skb) 90 { 91 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 92 u8 hmac[MPTCPOPT_HMAC_LEN]; 93 struct mptcp_sock *msk; 94 int local_id; 95 96 msk = mptcp_token_get_sock(subflow_req->token); 97 if (!msk) { 98 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 99 return false; 100 } 101 102 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); 103 if (local_id < 0) { 104 sock_put((struct sock *)msk); 105 return false; 106 } 107 subflow_req->local_id = local_id; 108 109 get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); 110 111 subflow_generate_hmac(msk->local_key, msk->remote_key, 112 subflow_req->local_nonce, 113 subflow_req->remote_nonce, hmac); 114 115 subflow_req->thmac = get_unaligned_be64(hmac); 116 117 sock_put((struct sock *)msk); 118 return true; 119 } 120 121 static void subflow_init_req(struct request_sock *req, 122 const struct sock *sk_listener, 123 struct sk_buff *skb) 124 { 125 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 126 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 127 struct mptcp_options_received mp_opt; 128 129 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 130 131 mptcp_get_options(skb, &mp_opt); 132 133 subflow_req->mp_capable = 0; 134 subflow_req->mp_join = 0; 135 136 #ifdef CONFIG_TCP_MD5SIG 137 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 138 * TCP option space. 139 */ 140 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) 141 return; 142 #endif 143 144 if (mp_opt.mp_capable) { 145 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 146 147 if (mp_opt.mp_join) 148 return; 149 } else if (mp_opt.mp_join) { 150 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 151 } 152 153 if (mp_opt.mp_capable && listener->request_mptcp) { 154 int err; 155 156 err = mptcp_token_new_request(req); 157 if (err == 0) 158 subflow_req->mp_capable = 1; 159 160 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 161 } else if (mp_opt.mp_join && listener->request_mptcp) { 162 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 163 subflow_req->mp_join = 1; 164 subflow_req->backup = mp_opt.backup; 165 subflow_req->remote_id = mp_opt.join_id; 166 subflow_req->token = mp_opt.token; 167 subflow_req->remote_nonce = mp_opt.nonce; 168 pr_debug("token=%u, remote_nonce=%u", subflow_req->token, 169 subflow_req->remote_nonce); 170 if (!subflow_token_join_request(req, skb)) { 171 subflow_req->mp_join = 0; 172 // @@ need to trigger RST 173 } 174 } 175 } 176 177 static void subflow_v4_init_req(struct request_sock *req, 178 const struct sock *sk_listener, 179 struct sk_buff *skb) 180 { 181 tcp_rsk(req)->is_mptcp = 1; 182 183 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb); 184 185 subflow_init_req(req, sk_listener, skb); 186 } 187 188 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 189 static void subflow_v6_init_req(struct request_sock *req, 190 const struct sock *sk_listener, 191 struct sk_buff *skb) 192 { 193 tcp_rsk(req)->is_mptcp = 1; 194 195 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb); 196 197 subflow_init_req(req, sk_listener, skb); 198 } 199 #endif 200 201 /* validate received truncated hmac and create hmac for third ACK */ 202 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 203 { 204 u8 hmac[MPTCPOPT_HMAC_LEN]; 205 u64 thmac; 206 207 subflow_generate_hmac(subflow->remote_key, subflow->local_key, 208 subflow->remote_nonce, subflow->local_nonce, 209 hmac); 210 211 thmac = get_unaligned_be64(hmac); 212 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", 213 subflow, subflow->token, 214 (unsigned long long)thmac, 215 (unsigned long long)subflow->thmac); 216 217 return thmac == subflow->thmac; 218 } 219 220 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 221 { 222 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 223 struct mptcp_options_received mp_opt; 224 struct sock *parent = subflow->conn; 225 struct tcp_sock *tp = tcp_sk(sk); 226 227 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 228 229 if (inet_sk_state_load(parent) == TCP_SYN_SENT) { 230 inet_sk_state_store(parent, TCP_ESTABLISHED); 231 parent->sk_state_change(parent); 232 } 233 234 /* be sure no special action on any packet other than syn-ack */ 235 if (subflow->conn_finished) 236 return; 237 238 subflow->conn_finished = 1; 239 240 mptcp_get_options(skb, &mp_opt); 241 if (subflow->request_mptcp && mp_opt.mp_capable) { 242 subflow->mp_capable = 1; 243 subflow->can_ack = 1; 244 subflow->remote_key = mp_opt.sndr_key; 245 pr_debug("subflow=%p, remote_key=%llu", subflow, 246 subflow->remote_key); 247 } else if (subflow->request_join && mp_opt.mp_join) { 248 subflow->mp_join = 1; 249 subflow->thmac = mp_opt.thmac; 250 subflow->remote_nonce = mp_opt.nonce; 251 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow, 252 subflow->thmac, subflow->remote_nonce); 253 } else if (subflow->request_mptcp) { 254 tp->is_mptcp = 0; 255 } 256 257 if (!tp->is_mptcp) 258 return; 259 260 if (subflow->mp_capable) { 261 pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk), 262 subflow->remote_key); 263 mptcp_finish_connect(sk); 264 265 if (skb) { 266 pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq); 267 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 268 } 269 } else if (subflow->mp_join) { 270 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", 271 subflow, subflow->thmac, 272 subflow->remote_nonce); 273 if (!subflow_thmac_valid(subflow)) { 274 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); 275 subflow->mp_join = 0; 276 goto do_reset; 277 } 278 279 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 280 subflow->local_nonce, 281 subflow->remote_nonce, 282 subflow->hmac); 283 284 if (skb) 285 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 286 287 if (!mptcp_finish_join(sk)) 288 goto do_reset; 289 290 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 291 } else { 292 do_reset: 293 tcp_send_active_reset(sk, GFP_ATOMIC); 294 tcp_done(sk); 295 } 296 } 297 298 static struct request_sock_ops subflow_request_sock_ops; 299 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops; 300 301 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 302 { 303 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 304 305 pr_debug("subflow=%p", subflow); 306 307 /* Never answer to SYNs sent to broadcast or multicast */ 308 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 309 goto drop; 310 311 return tcp_conn_request(&subflow_request_sock_ops, 312 &subflow_request_sock_ipv4_ops, 313 sk, skb); 314 drop: 315 tcp_listendrop(sk); 316 return 0; 317 } 318 319 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 320 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops; 321 static struct inet_connection_sock_af_ops subflow_v6_specific; 322 static struct inet_connection_sock_af_ops subflow_v6m_specific; 323 324 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 325 { 326 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 327 328 pr_debug("subflow=%p", subflow); 329 330 if (skb->protocol == htons(ETH_P_IP)) 331 return subflow_v4_conn_request(sk, skb); 332 333 if (!ipv6_unicast_destination(skb)) 334 goto drop; 335 336 return tcp_conn_request(&subflow_request_sock_ops, 337 &subflow_request_sock_ipv6_ops, sk, skb); 338 339 drop: 340 tcp_listendrop(sk); 341 return 0; /* don't send reset */ 342 } 343 #endif 344 345 /* validate hmac received in third ACK */ 346 static bool subflow_hmac_valid(const struct request_sock *req, 347 const struct mptcp_options_received *mp_opt) 348 { 349 const struct mptcp_subflow_request_sock *subflow_req; 350 u8 hmac[MPTCPOPT_HMAC_LEN]; 351 struct mptcp_sock *msk; 352 bool ret; 353 354 subflow_req = mptcp_subflow_rsk(req); 355 msk = mptcp_token_get_sock(subflow_req->token); 356 if (!msk) 357 return false; 358 359 subflow_generate_hmac(msk->remote_key, msk->local_key, 360 subflow_req->remote_nonce, 361 subflow_req->local_nonce, hmac); 362 363 ret = true; 364 if (crypto_memneq(hmac, mp_opt->hmac, sizeof(hmac))) 365 ret = false; 366 367 sock_put((struct sock *)msk); 368 return ret; 369 } 370 371 static void mptcp_sock_destruct(struct sock *sk) 372 { 373 /* if new mptcp socket isn't accepted, it is free'd 374 * from the tcp listener sockets request queue, linked 375 * from req->sk. The tcp socket is released. 376 * This calls the ULP release function which will 377 * also remove the mptcp socket, via 378 * sock_put(ctx->conn). 379 * 380 * Problem is that the mptcp socket will not be in 381 * SYN_RECV state and doesn't have SOCK_DEAD flag. 382 * Both result in warnings from inet_sock_destruct. 383 */ 384 385 if (sk->sk_state == TCP_SYN_RECV) { 386 sk->sk_state = TCP_CLOSE; 387 WARN_ON_ONCE(sk->sk_socket); 388 sock_orphan(sk); 389 } 390 391 inet_sock_destruct(sk); 392 } 393 394 static void mptcp_force_close(struct sock *sk) 395 { 396 inet_sk_state_store(sk, TCP_CLOSE); 397 sk_common_release(sk); 398 } 399 400 static void subflow_ulp_fallback(struct sock *sk, 401 struct mptcp_subflow_context *old_ctx) 402 { 403 struct inet_connection_sock *icsk = inet_csk(sk); 404 405 mptcp_subflow_tcp_fallback(sk, old_ctx); 406 icsk->icsk_ulp_ops = NULL; 407 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 408 tcp_sk(sk)->is_mptcp = 0; 409 } 410 411 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 412 struct sk_buff *skb, 413 struct request_sock *req, 414 struct dst_entry *dst, 415 struct request_sock *req_unhash, 416 bool *own_req) 417 { 418 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 419 struct mptcp_subflow_request_sock *subflow_req; 420 struct mptcp_options_received mp_opt; 421 bool fallback_is_fatal = false; 422 struct sock *new_msk = NULL; 423 bool fallback = false; 424 struct sock *child; 425 426 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 427 428 /* we need later a valid 'mp_capable' value even when options are not 429 * parsed 430 */ 431 mp_opt.mp_capable = 0; 432 if (tcp_rsk(req)->is_mptcp == 0) 433 goto create_child; 434 435 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 436 subflow_req = mptcp_subflow_rsk(req); 437 if (subflow_req->mp_capable) { 438 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) { 439 /* here we can receive and accept an in-window, 440 * out-of-order pkt, which will not carry the MP_CAPABLE 441 * opt even on mptcp enabled paths 442 */ 443 goto create_msk; 444 } 445 446 mptcp_get_options(skb, &mp_opt); 447 if (!mp_opt.mp_capable) { 448 fallback = true; 449 goto create_child; 450 } 451 452 create_msk: 453 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req); 454 if (!new_msk) 455 fallback = true; 456 } else if (subflow_req->mp_join) { 457 fallback_is_fatal = true; 458 mptcp_get_options(skb, &mp_opt); 459 if (!mp_opt.mp_join || 460 !subflow_hmac_valid(req, &mp_opt)) { 461 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 462 return NULL; 463 } 464 } 465 466 create_child: 467 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 468 req_unhash, own_req); 469 470 if (child && *own_req) { 471 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 472 473 tcp_rsk(req)->drop_req = false; 474 475 /* we need to fallback on ctx allocation failure and on pre-reqs 476 * checking above. In the latter scenario we additionally need 477 * to reset the context to non MPTCP status. 478 */ 479 if (!ctx || fallback) { 480 if (fallback_is_fatal) 481 goto dispose_child; 482 483 if (ctx) { 484 subflow_ulp_fallback(child, ctx); 485 kfree_rcu(ctx, rcu); 486 } 487 goto out; 488 } 489 490 if (ctx->mp_capable) { 491 /* new mpc subflow takes ownership of the newly 492 * created mptcp socket 493 */ 494 new_msk->sk_destruct = mptcp_sock_destruct; 495 mptcp_pm_new_connection(mptcp_sk(new_msk), 1); 496 ctx->conn = new_msk; 497 new_msk = NULL; 498 499 /* with OoO packets we can reach here without ingress 500 * mpc option 501 */ 502 ctx->remote_key = mp_opt.sndr_key; 503 ctx->fully_established = mp_opt.mp_capable; 504 ctx->can_ack = mp_opt.mp_capable; 505 } else if (ctx->mp_join) { 506 struct mptcp_sock *owner; 507 508 owner = mptcp_token_get_sock(ctx->token); 509 if (!owner) 510 goto dispose_child; 511 512 ctx->conn = (struct sock *)owner; 513 if (!mptcp_finish_join(child)) 514 goto dispose_child; 515 516 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); 517 tcp_rsk(req)->drop_req = true; 518 } 519 } 520 521 out: 522 /* dispose of the left over mptcp master, if any */ 523 if (unlikely(new_msk)) 524 mptcp_force_close(new_msk); 525 526 /* check for expected invariant - should never trigger, just help 527 * catching eariler subtle bugs 528 */ 529 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 530 (!mptcp_subflow_ctx(child) || 531 !mptcp_subflow_ctx(child)->conn)); 532 return child; 533 534 dispose_child: 535 tcp_rsk(req)->drop_req = true; 536 tcp_send_active_reset(child, GFP_ATOMIC); 537 inet_csk_prepare_for_destroy_sock(child); 538 tcp_done(child); 539 540 /* The last child reference will be released by the caller */ 541 return child; 542 } 543 544 static struct inet_connection_sock_af_ops subflow_specific; 545 546 enum mapping_status { 547 MAPPING_OK, 548 MAPPING_INVALID, 549 MAPPING_EMPTY, 550 MAPPING_DATA_FIN 551 }; 552 553 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq) 554 { 555 if ((u32)seq == (u32)old_seq) 556 return old_seq; 557 558 /* Assume map covers data not mapped yet. */ 559 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32)); 560 } 561 562 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 563 { 564 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d", 565 ssn, subflow->map_subflow_seq, subflow->map_data_len); 566 } 567 568 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 569 { 570 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 571 unsigned int skb_consumed; 572 573 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 574 if (WARN_ON_ONCE(skb_consumed >= skb->len)) 575 return true; 576 577 return skb->len - skb_consumed <= subflow->map_data_len - 578 mptcp_subflow_get_map_offset(subflow); 579 } 580 581 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 582 { 583 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 584 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 585 586 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 587 /* Mapping covers data later in the subflow stream, 588 * currently unsupported. 589 */ 590 warn_bad_map(subflow, ssn); 591 return false; 592 } 593 if (unlikely(!before(ssn, subflow->map_subflow_seq + 594 subflow->map_data_len))) { 595 /* Mapping does covers past subflow data, invalid */ 596 warn_bad_map(subflow, ssn + skb->len); 597 return false; 598 } 599 return true; 600 } 601 602 static enum mapping_status get_mapping_status(struct sock *ssk) 603 { 604 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 605 struct mptcp_ext *mpext; 606 struct sk_buff *skb; 607 u16 data_len; 608 u64 map_seq; 609 610 skb = skb_peek(&ssk->sk_receive_queue); 611 if (!skb) 612 return MAPPING_EMPTY; 613 614 mpext = mptcp_get_ext(skb); 615 if (!mpext || !mpext->use_map) { 616 if (!subflow->map_valid && !skb->len) { 617 /* the TCP stack deliver 0 len FIN pkt to the receive 618 * queue, that is the only 0len pkts ever expected here, 619 * and we can admit no mapping only for 0 len pkts 620 */ 621 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 622 WARN_ONCE(1, "0len seq %d:%d flags %x", 623 TCP_SKB_CB(skb)->seq, 624 TCP_SKB_CB(skb)->end_seq, 625 TCP_SKB_CB(skb)->tcp_flags); 626 sk_eat_skb(ssk, skb); 627 return MAPPING_EMPTY; 628 } 629 630 if (!subflow->map_valid) 631 return MAPPING_INVALID; 632 633 goto validate_seq; 634 } 635 636 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d", 637 mpext->data_seq, mpext->dsn64, mpext->subflow_seq, 638 mpext->data_len, mpext->data_fin); 639 640 data_len = mpext->data_len; 641 if (data_len == 0) { 642 pr_err("Infinite mapping not handled"); 643 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 644 return MAPPING_INVALID; 645 } 646 647 if (mpext->data_fin == 1) { 648 if (data_len == 1) { 649 pr_debug("DATA_FIN with no payload"); 650 if (subflow->map_valid) { 651 /* A DATA_FIN might arrive in a DSS 652 * option before the previous mapping 653 * has been fully consumed. Continue 654 * handling the existing mapping. 655 */ 656 skb_ext_del(skb, SKB_EXT_MPTCP); 657 return MAPPING_OK; 658 } else { 659 return MAPPING_DATA_FIN; 660 } 661 } 662 663 /* Adjust for DATA_FIN using 1 byte of sequence space */ 664 data_len--; 665 } 666 667 if (!mpext->dsn64) { 668 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len, 669 mpext->data_seq); 670 subflow->use_64bit_ack = 0; 671 pr_debug("expanded seq=%llu", subflow->map_seq); 672 } else { 673 map_seq = mpext->data_seq; 674 subflow->use_64bit_ack = 1; 675 } 676 677 if (subflow->map_valid) { 678 /* Allow replacing only with an identical map */ 679 if (subflow->map_seq == map_seq && 680 subflow->map_subflow_seq == mpext->subflow_seq && 681 subflow->map_data_len == data_len) { 682 skb_ext_del(skb, SKB_EXT_MPTCP); 683 return MAPPING_OK; 684 } 685 686 /* If this skb data are fully covered by the current mapping, 687 * the new map would need caching, which is not supported 688 */ 689 if (skb_is_fully_mapped(ssk, skb)) { 690 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); 691 return MAPPING_INVALID; 692 } 693 694 /* will validate the next map after consuming the current one */ 695 return MAPPING_OK; 696 } 697 698 subflow->map_seq = map_seq; 699 subflow->map_subflow_seq = mpext->subflow_seq; 700 subflow->map_data_len = data_len; 701 subflow->map_valid = 1; 702 subflow->mpc_map = mpext->mpc_map; 703 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u", 704 subflow->map_seq, subflow->map_subflow_seq, 705 subflow->map_data_len); 706 707 validate_seq: 708 /* we revalidate valid mapping on new skb, because we must ensure 709 * the current skb is completely covered by the available mapping 710 */ 711 if (!validate_mapping(ssk, skb)) 712 return MAPPING_INVALID; 713 714 skb_ext_del(skb, SKB_EXT_MPTCP); 715 return MAPPING_OK; 716 } 717 718 static int subflow_read_actor(read_descriptor_t *desc, 719 struct sk_buff *skb, 720 unsigned int offset, size_t len) 721 { 722 size_t copy_len = min(desc->count, len); 723 724 desc->count -= copy_len; 725 726 pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count); 727 return copy_len; 728 } 729 730 static bool subflow_check_data_avail(struct sock *ssk) 731 { 732 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 733 enum mapping_status status; 734 struct mptcp_sock *msk; 735 struct sk_buff *skb; 736 737 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk, 738 subflow->data_avail, skb_peek(&ssk->sk_receive_queue)); 739 if (subflow->data_avail) 740 return true; 741 742 msk = mptcp_sk(subflow->conn); 743 for (;;) { 744 u32 map_remaining; 745 size_t delta; 746 u64 ack_seq; 747 u64 old_ack; 748 749 status = get_mapping_status(ssk); 750 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status); 751 if (status == MAPPING_INVALID) { 752 ssk->sk_err = EBADMSG; 753 goto fatal; 754 } 755 756 if (status != MAPPING_OK) 757 return false; 758 759 skb = skb_peek(&ssk->sk_receive_queue); 760 if (WARN_ON_ONCE(!skb)) 761 return false; 762 763 /* if msk lacks the remote key, this subflow must provide an 764 * MP_CAPABLE-based mapping 765 */ 766 if (unlikely(!READ_ONCE(msk->can_ack))) { 767 if (!subflow->mpc_map) { 768 ssk->sk_err = EBADMSG; 769 goto fatal; 770 } 771 WRITE_ONCE(msk->remote_key, subflow->remote_key); 772 WRITE_ONCE(msk->ack_seq, subflow->map_seq); 773 WRITE_ONCE(msk->can_ack, true); 774 } 775 776 old_ack = READ_ONCE(msk->ack_seq); 777 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 778 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, 779 ack_seq); 780 if (ack_seq == old_ack) 781 break; 782 783 /* only accept in-sequence mapping. Old values are spurious 784 * retransmission; we can hit "future" values on active backup 785 * subflow switch, we relay on retransmissions to get 786 * in-sequence data. 787 * Cuncurrent subflows support will require subflow data 788 * reordering 789 */ 790 map_remaining = subflow->map_data_len - 791 mptcp_subflow_get_map_offset(subflow); 792 if (before64(ack_seq, old_ack)) 793 delta = min_t(size_t, old_ack - ack_seq, map_remaining); 794 else 795 delta = min_t(size_t, ack_seq - old_ack, map_remaining); 796 797 /* discard mapped data */ 798 pr_debug("discarding %zu bytes, current map len=%d", delta, 799 map_remaining); 800 if (delta) { 801 read_descriptor_t desc = { 802 .count = delta, 803 }; 804 int ret; 805 806 ret = tcp_read_sock(ssk, &desc, subflow_read_actor); 807 if (ret < 0) { 808 ssk->sk_err = -ret; 809 goto fatal; 810 } 811 if (ret < delta) 812 return false; 813 if (delta == map_remaining) 814 subflow->map_valid = 0; 815 } 816 } 817 return true; 818 819 fatal: 820 /* fatal protocol error, close the socket */ 821 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 822 smp_wmb(); 823 ssk->sk_error_report(ssk); 824 tcp_set_state(ssk, TCP_CLOSE); 825 tcp_send_active_reset(ssk, GFP_ATOMIC); 826 return false; 827 } 828 829 bool mptcp_subflow_data_available(struct sock *sk) 830 { 831 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 832 struct sk_buff *skb; 833 834 /* check if current mapping is still valid */ 835 if (subflow->map_valid && 836 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 837 subflow->map_valid = 0; 838 subflow->data_avail = 0; 839 840 pr_debug("Done with mapping: seq=%u data_len=%u", 841 subflow->map_subflow_seq, 842 subflow->map_data_len); 843 } 844 845 if (!subflow_check_data_avail(sk)) { 846 subflow->data_avail = 0; 847 return false; 848 } 849 850 skb = skb_peek(&sk->sk_receive_queue); 851 subflow->data_avail = skb && 852 before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq); 853 return subflow->data_avail; 854 } 855 856 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, 857 * not the ssk one. 858 * 859 * In mptcp, rwin is about the mptcp-level connection data. 860 * 861 * Data that is still on the ssk rx queue can thus be ignored, 862 * as far as mptcp peer is concerened that data is still inflight. 863 * DSS ACK is updated when skb is moved to the mptcp rx queue. 864 */ 865 void mptcp_space(const struct sock *ssk, int *space, int *full_space) 866 { 867 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 868 const struct sock *sk = subflow->conn; 869 870 *space = tcp_space(sk); 871 *full_space = tcp_full_space(sk); 872 } 873 874 static void subflow_data_ready(struct sock *sk) 875 { 876 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 877 struct sock *parent = subflow->conn; 878 879 if (!subflow->mp_capable && !subflow->mp_join) { 880 subflow->tcp_data_ready(sk); 881 882 parent->sk_data_ready(parent); 883 return; 884 } 885 886 if (mptcp_subflow_data_available(sk)) 887 mptcp_data_ready(parent, sk); 888 } 889 890 static void subflow_write_space(struct sock *sk) 891 { 892 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 893 struct sock *parent = subflow->conn; 894 895 sk_stream_write_space(sk); 896 if (sk_stream_is_writeable(sk)) { 897 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags); 898 smp_mb__after_atomic(); 899 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */ 900 sk_stream_write_space(parent); 901 } 902 } 903 904 static struct inet_connection_sock_af_ops * 905 subflow_default_af_ops(struct sock *sk) 906 { 907 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 908 if (sk->sk_family == AF_INET6) 909 return &subflow_v6_specific; 910 #endif 911 return &subflow_specific; 912 } 913 914 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 915 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 916 { 917 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 918 struct inet_connection_sock *icsk = inet_csk(sk); 919 struct inet_connection_sock_af_ops *target; 920 921 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 922 923 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", 924 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 925 926 if (likely(icsk->icsk_af_ops == target)) 927 return; 928 929 subflow->icsk_af_ops = icsk->icsk_af_ops; 930 icsk->icsk_af_ops = target; 931 } 932 #endif 933 934 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 935 struct sockaddr_storage *addr) 936 { 937 memset(addr, 0, sizeof(*addr)); 938 addr->ss_family = info->family; 939 if (addr->ss_family == AF_INET) { 940 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; 941 942 in_addr->sin_addr = info->addr; 943 in_addr->sin_port = info->port; 944 } 945 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 946 else if (addr->ss_family == AF_INET6) { 947 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; 948 949 in6_addr->sin6_addr = info->addr6; 950 in6_addr->sin6_port = info->port; 951 } 952 #endif 953 } 954 955 int __mptcp_subflow_connect(struct sock *sk, int ifindex, 956 const struct mptcp_addr_info *loc, 957 const struct mptcp_addr_info *remote) 958 { 959 struct mptcp_sock *msk = mptcp_sk(sk); 960 struct mptcp_subflow_context *subflow; 961 struct sockaddr_storage addr; 962 struct socket *sf; 963 u32 remote_token; 964 int addrlen; 965 int err; 966 967 if (sk->sk_state != TCP_ESTABLISHED) 968 return -ENOTCONN; 969 970 err = mptcp_subflow_create_socket(sk, &sf); 971 if (err) 972 return err; 973 974 subflow = mptcp_subflow_ctx(sf->sk); 975 subflow->remote_key = msk->remote_key; 976 subflow->local_key = msk->local_key; 977 subflow->token = msk->token; 978 mptcp_info2sockaddr(loc, &addr); 979 980 addrlen = sizeof(struct sockaddr_in); 981 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 982 if (loc->family == AF_INET6) 983 addrlen = sizeof(struct sockaddr_in6); 984 #endif 985 sf->sk->sk_bound_dev_if = ifindex; 986 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); 987 if (err) 988 goto failed; 989 990 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 991 pr_debug("msk=%p remote_token=%u", msk, remote_token); 992 subflow->remote_token = remote_token; 993 subflow->local_id = loc->id; 994 subflow->request_join = 1; 995 subflow->request_bkup = 1; 996 mptcp_info2sockaddr(remote, &addr); 997 998 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); 999 if (err && err != -EINPROGRESS) 1000 goto failed; 1001 1002 spin_lock_bh(&msk->join_list_lock); 1003 list_add_tail(&subflow->node, &msk->join_list); 1004 spin_unlock_bh(&msk->join_list_lock); 1005 1006 return err; 1007 1008 failed: 1009 sock_release(sf); 1010 return err; 1011 } 1012 1013 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) 1014 { 1015 struct mptcp_subflow_context *subflow; 1016 struct net *net = sock_net(sk); 1017 struct socket *sf; 1018 int err; 1019 1020 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP, 1021 &sf); 1022 if (err) 1023 return err; 1024 1025 lock_sock(sf->sk); 1026 1027 /* kernel sockets do not by default acquire net ref, but TCP timer 1028 * needs it. 1029 */ 1030 sf->sk->sk_net_refcnt = 1; 1031 get_net(net); 1032 #ifdef CONFIG_PROC_FS 1033 this_cpu_add(*net->core.sock_inuse, 1); 1034 #endif 1035 err = tcp_set_ulp(sf->sk, "mptcp"); 1036 release_sock(sf->sk); 1037 1038 if (err) 1039 return err; 1040 1041 /* the newly created socket really belongs to the owning MPTCP master 1042 * socket, even if for additional subflows the allocation is performed 1043 * by a kernel workqueue. Adjust inode references, so that the 1044 * procfs/diag interaces really show this one belonging to the correct 1045 * user. 1046 */ 1047 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; 1048 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; 1049 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1050 1051 subflow = mptcp_subflow_ctx(sf->sk); 1052 pr_debug("subflow=%p", subflow); 1053 1054 *new_sock = sf; 1055 sock_hold(sk); 1056 subflow->conn = sk; 1057 1058 return 0; 1059 } 1060 1061 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 1062 gfp_t priority) 1063 { 1064 struct inet_connection_sock *icsk = inet_csk(sk); 1065 struct mptcp_subflow_context *ctx; 1066 1067 ctx = kzalloc(sizeof(*ctx), priority); 1068 if (!ctx) 1069 return NULL; 1070 1071 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 1072 INIT_LIST_HEAD(&ctx->node); 1073 1074 pr_debug("subflow=%p", ctx); 1075 1076 ctx->tcp_sock = sk; 1077 1078 return ctx; 1079 } 1080 1081 static void __subflow_state_change(struct sock *sk) 1082 { 1083 struct socket_wq *wq; 1084 1085 rcu_read_lock(); 1086 wq = rcu_dereference(sk->sk_wq); 1087 if (skwq_has_sleeper(wq)) 1088 wake_up_interruptible_all(&wq->wait); 1089 rcu_read_unlock(); 1090 } 1091 1092 static bool subflow_is_done(const struct sock *sk) 1093 { 1094 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 1095 } 1096 1097 static void subflow_state_change(struct sock *sk) 1098 { 1099 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1100 struct sock *parent = subflow->conn; 1101 1102 __subflow_state_change(sk); 1103 1104 /* as recvmsg() does not acquire the subflow socket for ssk selection 1105 * a fin packet carrying a DSS can be unnoticed if we don't trigger 1106 * the data available machinery here. 1107 */ 1108 if (subflow->mp_capable && mptcp_subflow_data_available(sk)) 1109 mptcp_data_ready(parent, sk); 1110 1111 if (!(parent->sk_shutdown & RCV_SHUTDOWN) && 1112 !subflow->rx_eof && subflow_is_done(sk)) { 1113 subflow->rx_eof = 1; 1114 mptcp_subflow_eof(parent); 1115 } 1116 } 1117 1118 static int subflow_ulp_init(struct sock *sk) 1119 { 1120 struct inet_connection_sock *icsk = inet_csk(sk); 1121 struct mptcp_subflow_context *ctx; 1122 struct tcp_sock *tp = tcp_sk(sk); 1123 int err = 0; 1124 1125 /* disallow attaching ULP to a socket unless it has been 1126 * created with sock_create_kern() 1127 */ 1128 if (!sk->sk_kern_sock) { 1129 err = -EOPNOTSUPP; 1130 goto out; 1131 } 1132 1133 ctx = subflow_create_ctx(sk, GFP_KERNEL); 1134 if (!ctx) { 1135 err = -ENOMEM; 1136 goto out; 1137 } 1138 1139 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); 1140 1141 tp->is_mptcp = 1; 1142 ctx->icsk_af_ops = icsk->icsk_af_ops; 1143 icsk->icsk_af_ops = subflow_default_af_ops(sk); 1144 ctx->tcp_data_ready = sk->sk_data_ready; 1145 ctx->tcp_state_change = sk->sk_state_change; 1146 ctx->tcp_write_space = sk->sk_write_space; 1147 sk->sk_data_ready = subflow_data_ready; 1148 sk->sk_write_space = subflow_write_space; 1149 sk->sk_state_change = subflow_state_change; 1150 out: 1151 return err; 1152 } 1153 1154 static void subflow_ulp_release(struct sock *sk) 1155 { 1156 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk); 1157 1158 if (!ctx) 1159 return; 1160 1161 if (ctx->conn) 1162 sock_put(ctx->conn); 1163 1164 kfree_rcu(ctx, rcu); 1165 } 1166 1167 static void subflow_ulp_clone(const struct request_sock *req, 1168 struct sock *newsk, 1169 const gfp_t priority) 1170 { 1171 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 1172 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 1173 struct mptcp_subflow_context *new_ctx; 1174 1175 if (!tcp_rsk(req)->is_mptcp || 1176 (!subflow_req->mp_capable && !subflow_req->mp_join)) { 1177 subflow_ulp_fallback(newsk, old_ctx); 1178 return; 1179 } 1180 1181 new_ctx = subflow_create_ctx(newsk, priority); 1182 if (!new_ctx) { 1183 subflow_ulp_fallback(newsk, old_ctx); 1184 return; 1185 } 1186 1187 new_ctx->conn_finished = 1; 1188 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 1189 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready; 1190 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 1191 new_ctx->tcp_write_space = old_ctx->tcp_write_space; 1192 new_ctx->rel_write_seq = 1; 1193 new_ctx->tcp_sock = newsk; 1194 1195 if (subflow_req->mp_capable) { 1196 /* see comments in subflow_syn_recv_sock(), MPTCP connection 1197 * is fully established only after we receive the remote key 1198 */ 1199 new_ctx->mp_capable = 1; 1200 new_ctx->local_key = subflow_req->local_key; 1201 new_ctx->token = subflow_req->token; 1202 new_ctx->ssn_offset = subflow_req->ssn_offset; 1203 new_ctx->idsn = subflow_req->idsn; 1204 } else if (subflow_req->mp_join) { 1205 new_ctx->ssn_offset = subflow_req->ssn_offset; 1206 new_ctx->mp_join = 1; 1207 new_ctx->fully_established = 1; 1208 new_ctx->backup = subflow_req->backup; 1209 new_ctx->local_id = subflow_req->local_id; 1210 new_ctx->token = subflow_req->token; 1211 new_ctx->thmac = subflow_req->thmac; 1212 } 1213 } 1214 1215 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 1216 .name = "mptcp", 1217 .owner = THIS_MODULE, 1218 .init = subflow_ulp_init, 1219 .release = subflow_ulp_release, 1220 .clone = subflow_ulp_clone, 1221 }; 1222 1223 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 1224 { 1225 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 1226 subflow_ops->slab_name = "request_sock_subflow"; 1227 1228 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 1229 subflow_ops->obj_size, 0, 1230 SLAB_ACCOUNT | 1231 SLAB_TYPESAFE_BY_RCU, 1232 NULL); 1233 if (!subflow_ops->slab) 1234 return -ENOMEM; 1235 1236 subflow_ops->destructor = subflow_req_destructor; 1237 1238 return 0; 1239 } 1240 1241 void mptcp_subflow_init(void) 1242 { 1243 subflow_request_sock_ops = tcp_request_sock_ops; 1244 if (subflow_ops_init(&subflow_request_sock_ops) != 0) 1245 panic("MPTCP: failed to init subflow request sock ops\n"); 1246 1247 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 1248 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req; 1249 1250 subflow_specific = ipv4_specific; 1251 subflow_specific.conn_request = subflow_v4_conn_request; 1252 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 1253 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 1254 subflow_specific.rebuild_header = subflow_rebuild_header; 1255 1256 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1257 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 1258 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req; 1259 1260 subflow_v6_specific = ipv6_specific; 1261 subflow_v6_specific.conn_request = subflow_v6_conn_request; 1262 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 1263 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 1264 subflow_v6_specific.rebuild_header = subflow_rebuild_header; 1265 1266 subflow_v6m_specific = subflow_v6_specific; 1267 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 1268 subflow_v6m_specific.send_check = ipv4_specific.send_check; 1269 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 1270 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 1271 subflow_v6m_specific.net_frag_header_len = 0; 1272 #endif 1273 1274 mptcp_diag_subflow_init(&subflow_ulp_ops); 1275 1276 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 1277 panic("MPTCP: failed to register subflows to ULP\n"); 1278 } 1279