1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <net/sock.h> 13 #include <net/inet_common.h> 14 #include <net/inet_hashtables.h> 15 #include <net/protocol.h> 16 #include <net/tcp.h> 17 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 18 #include <net/ip6_route.h> 19 #endif 20 #include <net/mptcp.h> 21 #include "protocol.h" 22 23 static int subflow_rebuild_header(struct sock *sk) 24 { 25 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 26 int err = 0; 27 28 if (subflow->request_mptcp && !subflow->token) { 29 pr_debug("subflow=%p", sk); 30 err = mptcp_token_new_connect(sk); 31 } 32 33 if (err) 34 return err; 35 36 return subflow->icsk_af_ops->rebuild_header(sk); 37 } 38 39 static void subflow_req_destructor(struct request_sock *req) 40 { 41 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 42 43 pr_debug("subflow_req=%p", subflow_req); 44 45 if (subflow_req->mp_capable) 46 mptcp_token_destroy_request(subflow_req->token); 47 tcp_request_sock_ops.destructor(req); 48 } 49 50 static void subflow_init_req(struct request_sock *req, 51 const struct sock *sk_listener, 52 struct sk_buff *skb) 53 { 54 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 55 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 56 struct tcp_options_received rx_opt; 57 58 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 59 60 memset(&rx_opt.mptcp, 0, sizeof(rx_opt.mptcp)); 61 mptcp_get_options(skb, &rx_opt); 62 63 subflow_req->mp_capable = 0; 64 subflow_req->remote_key_valid = 0; 65 66 #ifdef CONFIG_TCP_MD5SIG 67 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 68 * TCP option space. 69 */ 70 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) 71 return; 72 #endif 73 74 if (rx_opt.mptcp.mp_capable && listener->request_mptcp) { 75 int err; 76 77 err = mptcp_token_new_request(req); 78 if (err == 0) 79 subflow_req->mp_capable = 1; 80 81 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 82 } 83 } 84 85 static void subflow_v4_init_req(struct request_sock *req, 86 const struct sock *sk_listener, 87 struct sk_buff *skb) 88 { 89 tcp_rsk(req)->is_mptcp = 1; 90 91 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb); 92 93 subflow_init_req(req, sk_listener, skb); 94 } 95 96 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 97 static void subflow_v6_init_req(struct request_sock *req, 98 const struct sock *sk_listener, 99 struct sk_buff *skb) 100 { 101 tcp_rsk(req)->is_mptcp = 1; 102 103 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb); 104 105 subflow_init_req(req, sk_listener, skb); 106 } 107 #endif 108 109 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 110 { 111 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 112 113 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 114 115 if (!subflow->conn_finished) { 116 pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk), 117 subflow->remote_key); 118 mptcp_finish_connect(sk); 119 subflow->conn_finished = 1; 120 121 if (skb) { 122 pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq); 123 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 124 } 125 } 126 } 127 128 static struct request_sock_ops subflow_request_sock_ops; 129 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops; 130 131 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 132 { 133 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 134 135 pr_debug("subflow=%p", subflow); 136 137 /* Never answer to SYNs sent to broadcast or multicast */ 138 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 139 goto drop; 140 141 return tcp_conn_request(&subflow_request_sock_ops, 142 &subflow_request_sock_ipv4_ops, 143 sk, skb); 144 drop: 145 tcp_listendrop(sk); 146 return 0; 147 } 148 149 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 150 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops; 151 static struct inet_connection_sock_af_ops subflow_v6_specific; 152 static struct inet_connection_sock_af_ops subflow_v6m_specific; 153 154 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 155 { 156 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 157 158 pr_debug("subflow=%p", subflow); 159 160 if (skb->protocol == htons(ETH_P_IP)) 161 return subflow_v4_conn_request(sk, skb); 162 163 if (!ipv6_unicast_destination(skb)) 164 goto drop; 165 166 return tcp_conn_request(&subflow_request_sock_ops, 167 &subflow_request_sock_ipv6_ops, sk, skb); 168 169 drop: 170 tcp_listendrop(sk); 171 return 0; /* don't send reset */ 172 } 173 #endif 174 175 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 176 struct sk_buff *skb, 177 struct request_sock *req, 178 struct dst_entry *dst, 179 struct request_sock *req_unhash, 180 bool *own_req) 181 { 182 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 183 struct mptcp_subflow_request_sock *subflow_req; 184 struct tcp_options_received opt_rx; 185 struct sock *new_msk = NULL; 186 struct sock *child; 187 188 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 189 190 if (tcp_rsk(req)->is_mptcp == 0) 191 goto create_child; 192 193 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 194 subflow_req = mptcp_subflow_rsk(req); 195 if (subflow_req->mp_capable) { 196 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) { 197 /* here we can receive and accept an in-window, 198 * out-of-order pkt, which will not carry the MP_CAPABLE 199 * opt even on mptcp enabled paths 200 */ 201 goto create_msk; 202 } 203 204 opt_rx.mptcp.mp_capable = 0; 205 mptcp_get_options(skb, &opt_rx); 206 if (opt_rx.mptcp.mp_capable) { 207 subflow_req->remote_key = opt_rx.mptcp.sndr_key; 208 subflow_req->remote_key_valid = 1; 209 } else { 210 subflow_req->mp_capable = 0; 211 goto create_child; 212 } 213 214 create_msk: 215 new_msk = mptcp_sk_clone(listener->conn, req); 216 if (!new_msk) 217 subflow_req->mp_capable = 0; 218 } 219 220 create_child: 221 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 222 req_unhash, own_req); 223 224 if (child && *own_req) { 225 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 226 227 /* we have null ctx on TCP fallback, not fatal on MPC 228 * handshake 229 */ 230 if (!ctx) 231 goto out; 232 233 if (ctx->mp_capable) { 234 /* new mpc subflow takes ownership of the newly 235 * created mptcp socket 236 */ 237 ctx->conn = new_msk; 238 new_msk = NULL; 239 } 240 } 241 242 out: 243 /* dispose of the left over mptcp master, if any */ 244 if (unlikely(new_msk)) 245 sock_put(new_msk); 246 return child; 247 } 248 249 static struct inet_connection_sock_af_ops subflow_specific; 250 251 enum mapping_status { 252 MAPPING_OK, 253 MAPPING_INVALID, 254 MAPPING_EMPTY, 255 MAPPING_DATA_FIN 256 }; 257 258 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq) 259 { 260 if ((u32)seq == (u32)old_seq) 261 return old_seq; 262 263 /* Assume map covers data not mapped yet. */ 264 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32)); 265 } 266 267 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 268 { 269 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d", 270 ssn, subflow->map_subflow_seq, subflow->map_data_len); 271 } 272 273 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 274 { 275 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 276 unsigned int skb_consumed; 277 278 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 279 if (WARN_ON_ONCE(skb_consumed >= skb->len)) 280 return true; 281 282 return skb->len - skb_consumed <= subflow->map_data_len - 283 mptcp_subflow_get_map_offset(subflow); 284 } 285 286 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 287 { 288 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 289 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 290 291 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 292 /* Mapping covers data later in the subflow stream, 293 * currently unsupported. 294 */ 295 warn_bad_map(subflow, ssn); 296 return false; 297 } 298 if (unlikely(!before(ssn, subflow->map_subflow_seq + 299 subflow->map_data_len))) { 300 /* Mapping does covers past subflow data, invalid */ 301 warn_bad_map(subflow, ssn + skb->len); 302 return false; 303 } 304 return true; 305 } 306 307 static enum mapping_status get_mapping_status(struct sock *ssk) 308 { 309 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 310 struct mptcp_ext *mpext; 311 struct sk_buff *skb; 312 u16 data_len; 313 u64 map_seq; 314 315 skb = skb_peek(&ssk->sk_receive_queue); 316 if (!skb) 317 return MAPPING_EMPTY; 318 319 mpext = mptcp_get_ext(skb); 320 if (!mpext || !mpext->use_map) { 321 if (!subflow->map_valid && !skb->len) { 322 /* the TCP stack deliver 0 len FIN pkt to the receive 323 * queue, that is the only 0len pkts ever expected here, 324 * and we can admit no mapping only for 0 len pkts 325 */ 326 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 327 WARN_ONCE(1, "0len seq %d:%d flags %x", 328 TCP_SKB_CB(skb)->seq, 329 TCP_SKB_CB(skb)->end_seq, 330 TCP_SKB_CB(skb)->tcp_flags); 331 sk_eat_skb(ssk, skb); 332 return MAPPING_EMPTY; 333 } 334 335 if (!subflow->map_valid) 336 return MAPPING_INVALID; 337 338 goto validate_seq; 339 } 340 341 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d", 342 mpext->data_seq, mpext->dsn64, mpext->subflow_seq, 343 mpext->data_len, mpext->data_fin); 344 345 data_len = mpext->data_len; 346 if (data_len == 0) { 347 pr_err("Infinite mapping not handled"); 348 return MAPPING_INVALID; 349 } 350 351 if (mpext->data_fin == 1) { 352 if (data_len == 1) { 353 pr_debug("DATA_FIN with no payload"); 354 if (subflow->map_valid) { 355 /* A DATA_FIN might arrive in a DSS 356 * option before the previous mapping 357 * has been fully consumed. Continue 358 * handling the existing mapping. 359 */ 360 skb_ext_del(skb, SKB_EXT_MPTCP); 361 return MAPPING_OK; 362 } else { 363 return MAPPING_DATA_FIN; 364 } 365 } 366 367 /* Adjust for DATA_FIN using 1 byte of sequence space */ 368 data_len--; 369 } 370 371 if (!mpext->dsn64) { 372 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len, 373 mpext->data_seq); 374 pr_debug("expanded seq=%llu", subflow->map_seq); 375 } else { 376 map_seq = mpext->data_seq; 377 } 378 379 if (subflow->map_valid) { 380 /* Allow replacing only with an identical map */ 381 if (subflow->map_seq == map_seq && 382 subflow->map_subflow_seq == mpext->subflow_seq && 383 subflow->map_data_len == data_len) { 384 skb_ext_del(skb, SKB_EXT_MPTCP); 385 return MAPPING_OK; 386 } 387 388 /* If this skb data are fully covered by the current mapping, 389 * the new map would need caching, which is not supported 390 */ 391 if (skb_is_fully_mapped(ssk, skb)) 392 return MAPPING_INVALID; 393 394 /* will validate the next map after consuming the current one */ 395 return MAPPING_OK; 396 } 397 398 subflow->map_seq = map_seq; 399 subflow->map_subflow_seq = mpext->subflow_seq; 400 subflow->map_data_len = data_len; 401 subflow->map_valid = 1; 402 subflow->mpc_map = mpext->mpc_map; 403 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u", 404 subflow->map_seq, subflow->map_subflow_seq, 405 subflow->map_data_len); 406 407 validate_seq: 408 /* we revalidate valid mapping on new skb, because we must ensure 409 * the current skb is completely covered by the available mapping 410 */ 411 if (!validate_mapping(ssk, skb)) 412 return MAPPING_INVALID; 413 414 skb_ext_del(skb, SKB_EXT_MPTCP); 415 return MAPPING_OK; 416 } 417 418 static int subflow_read_actor(read_descriptor_t *desc, 419 struct sk_buff *skb, 420 unsigned int offset, size_t len) 421 { 422 size_t copy_len = min(desc->count, len); 423 424 desc->count -= copy_len; 425 426 pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count); 427 return copy_len; 428 } 429 430 static bool subflow_check_data_avail(struct sock *ssk) 431 { 432 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 433 enum mapping_status status; 434 struct mptcp_sock *msk; 435 struct sk_buff *skb; 436 437 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk, 438 subflow->data_avail, skb_peek(&ssk->sk_receive_queue)); 439 if (subflow->data_avail) 440 return true; 441 442 msk = mptcp_sk(subflow->conn); 443 for (;;) { 444 u32 map_remaining; 445 size_t delta; 446 u64 ack_seq; 447 u64 old_ack; 448 449 status = get_mapping_status(ssk); 450 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status); 451 if (status == MAPPING_INVALID) { 452 ssk->sk_err = EBADMSG; 453 goto fatal; 454 } 455 456 if (status != MAPPING_OK) 457 return false; 458 459 skb = skb_peek(&ssk->sk_receive_queue); 460 if (WARN_ON_ONCE(!skb)) 461 return false; 462 463 /* if msk lacks the remote key, this subflow must provide an 464 * MP_CAPABLE-based mapping 465 */ 466 if (unlikely(!READ_ONCE(msk->can_ack))) { 467 if (!subflow->mpc_map) { 468 ssk->sk_err = EBADMSG; 469 goto fatal; 470 } 471 WRITE_ONCE(msk->remote_key, subflow->remote_key); 472 WRITE_ONCE(msk->ack_seq, subflow->map_seq); 473 WRITE_ONCE(msk->can_ack, true); 474 } 475 476 old_ack = READ_ONCE(msk->ack_seq); 477 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 478 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, 479 ack_seq); 480 if (ack_seq == old_ack) 481 break; 482 483 /* only accept in-sequence mapping. Old values are spurious 484 * retransmission; we can hit "future" values on active backup 485 * subflow switch, we relay on retransmissions to get 486 * in-sequence data. 487 * Cuncurrent subflows support will require subflow data 488 * reordering 489 */ 490 map_remaining = subflow->map_data_len - 491 mptcp_subflow_get_map_offset(subflow); 492 if (before64(ack_seq, old_ack)) 493 delta = min_t(size_t, old_ack - ack_seq, map_remaining); 494 else 495 delta = min_t(size_t, ack_seq - old_ack, map_remaining); 496 497 /* discard mapped data */ 498 pr_debug("discarding %zu bytes, current map len=%d", delta, 499 map_remaining); 500 if (delta) { 501 read_descriptor_t desc = { 502 .count = delta, 503 }; 504 int ret; 505 506 ret = tcp_read_sock(ssk, &desc, subflow_read_actor); 507 if (ret < 0) { 508 ssk->sk_err = -ret; 509 goto fatal; 510 } 511 if (ret < delta) 512 return false; 513 if (delta == map_remaining) 514 subflow->map_valid = 0; 515 } 516 } 517 return true; 518 519 fatal: 520 /* fatal protocol error, close the socket */ 521 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 522 smp_wmb(); 523 ssk->sk_error_report(ssk); 524 tcp_set_state(ssk, TCP_CLOSE); 525 tcp_send_active_reset(ssk, GFP_ATOMIC); 526 return false; 527 } 528 529 bool mptcp_subflow_data_available(struct sock *sk) 530 { 531 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 532 struct sk_buff *skb; 533 534 /* check if current mapping is still valid */ 535 if (subflow->map_valid && 536 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 537 subflow->map_valid = 0; 538 subflow->data_avail = 0; 539 540 pr_debug("Done with mapping: seq=%u data_len=%u", 541 subflow->map_subflow_seq, 542 subflow->map_data_len); 543 } 544 545 if (!subflow_check_data_avail(sk)) { 546 subflow->data_avail = 0; 547 return false; 548 } 549 550 skb = skb_peek(&sk->sk_receive_queue); 551 subflow->data_avail = skb && 552 before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq); 553 return subflow->data_avail; 554 } 555 556 static void subflow_data_ready(struct sock *sk) 557 { 558 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 559 struct sock *parent = subflow->conn; 560 561 if (!subflow->mp_capable) { 562 subflow->tcp_data_ready(sk); 563 564 parent->sk_data_ready(parent); 565 return; 566 } 567 568 if (mptcp_subflow_data_available(sk)) 569 mptcp_data_ready(parent, sk); 570 } 571 572 static void subflow_write_space(struct sock *sk) 573 { 574 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 575 struct sock *parent = subflow->conn; 576 577 sk_stream_write_space(sk); 578 if (sk_stream_is_writeable(sk)) { 579 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags); 580 smp_mb__after_atomic(); 581 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */ 582 sk_stream_write_space(parent); 583 } 584 } 585 586 static struct inet_connection_sock_af_ops * 587 subflow_default_af_ops(struct sock *sk) 588 { 589 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 590 if (sk->sk_family == AF_INET6) 591 return &subflow_v6_specific; 592 #endif 593 return &subflow_specific; 594 } 595 596 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 597 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 598 { 599 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 600 struct inet_connection_sock *icsk = inet_csk(sk); 601 struct inet_connection_sock_af_ops *target; 602 603 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 604 605 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", 606 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 607 608 if (likely(icsk->icsk_af_ops == target)) 609 return; 610 611 subflow->icsk_af_ops = icsk->icsk_af_ops; 612 icsk->icsk_af_ops = target; 613 } 614 #endif 615 616 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) 617 { 618 struct mptcp_subflow_context *subflow; 619 struct net *net = sock_net(sk); 620 struct socket *sf; 621 int err; 622 623 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP, 624 &sf); 625 if (err) 626 return err; 627 628 lock_sock(sf->sk); 629 630 /* kernel sockets do not by default acquire net ref, but TCP timer 631 * needs it. 632 */ 633 sf->sk->sk_net_refcnt = 1; 634 get_net(net); 635 #ifdef CONFIG_PROC_FS 636 this_cpu_add(*net->core.sock_inuse, 1); 637 #endif 638 err = tcp_set_ulp(sf->sk, "mptcp"); 639 release_sock(sf->sk); 640 641 if (err) 642 return err; 643 644 subflow = mptcp_subflow_ctx(sf->sk); 645 pr_debug("subflow=%p", subflow); 646 647 *new_sock = sf; 648 sock_hold(sk); 649 subflow->conn = sk; 650 651 return 0; 652 } 653 654 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 655 gfp_t priority) 656 { 657 struct inet_connection_sock *icsk = inet_csk(sk); 658 struct mptcp_subflow_context *ctx; 659 660 ctx = kzalloc(sizeof(*ctx), priority); 661 if (!ctx) 662 return NULL; 663 664 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 665 INIT_LIST_HEAD(&ctx->node); 666 667 pr_debug("subflow=%p", ctx); 668 669 ctx->tcp_sock = sk; 670 671 return ctx; 672 } 673 674 static void __subflow_state_change(struct sock *sk) 675 { 676 struct socket_wq *wq; 677 678 rcu_read_lock(); 679 wq = rcu_dereference(sk->sk_wq); 680 if (skwq_has_sleeper(wq)) 681 wake_up_interruptible_all(&wq->wait); 682 rcu_read_unlock(); 683 } 684 685 static bool subflow_is_done(const struct sock *sk) 686 { 687 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 688 } 689 690 static void subflow_state_change(struct sock *sk) 691 { 692 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 693 struct sock *parent = subflow->conn; 694 695 __subflow_state_change(sk); 696 697 /* as recvmsg() does not acquire the subflow socket for ssk selection 698 * a fin packet carrying a DSS can be unnoticed if we don't trigger 699 * the data available machinery here. 700 */ 701 if (subflow->mp_capable && mptcp_subflow_data_available(sk)) 702 mptcp_data_ready(parent, sk); 703 704 if (!(parent->sk_shutdown & RCV_SHUTDOWN) && 705 !subflow->rx_eof && subflow_is_done(sk)) { 706 subflow->rx_eof = 1; 707 parent->sk_shutdown |= RCV_SHUTDOWN; 708 __subflow_state_change(parent); 709 } 710 } 711 712 static int subflow_ulp_init(struct sock *sk) 713 { 714 struct inet_connection_sock *icsk = inet_csk(sk); 715 struct mptcp_subflow_context *ctx; 716 struct tcp_sock *tp = tcp_sk(sk); 717 int err = 0; 718 719 /* disallow attaching ULP to a socket unless it has been 720 * created with sock_create_kern() 721 */ 722 if (!sk->sk_kern_sock) { 723 err = -EOPNOTSUPP; 724 goto out; 725 } 726 727 ctx = subflow_create_ctx(sk, GFP_KERNEL); 728 if (!ctx) { 729 err = -ENOMEM; 730 goto out; 731 } 732 733 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); 734 735 tp->is_mptcp = 1; 736 ctx->icsk_af_ops = icsk->icsk_af_ops; 737 icsk->icsk_af_ops = subflow_default_af_ops(sk); 738 ctx->tcp_data_ready = sk->sk_data_ready; 739 ctx->tcp_state_change = sk->sk_state_change; 740 ctx->tcp_write_space = sk->sk_write_space; 741 sk->sk_data_ready = subflow_data_ready; 742 sk->sk_write_space = subflow_write_space; 743 sk->sk_state_change = subflow_state_change; 744 out: 745 return err; 746 } 747 748 static void subflow_ulp_release(struct sock *sk) 749 { 750 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk); 751 752 if (!ctx) 753 return; 754 755 if (ctx->conn) 756 sock_put(ctx->conn); 757 758 kfree_rcu(ctx, rcu); 759 } 760 761 static void subflow_ulp_fallback(struct sock *sk, 762 struct mptcp_subflow_context *old_ctx) 763 { 764 struct inet_connection_sock *icsk = inet_csk(sk); 765 766 mptcp_subflow_tcp_fallback(sk, old_ctx); 767 icsk->icsk_ulp_ops = NULL; 768 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 769 tcp_sk(sk)->is_mptcp = 0; 770 } 771 772 static void subflow_ulp_clone(const struct request_sock *req, 773 struct sock *newsk, 774 const gfp_t priority) 775 { 776 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 777 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 778 struct mptcp_subflow_context *new_ctx; 779 780 if (!tcp_rsk(req)->is_mptcp || !subflow_req->mp_capable) { 781 subflow_ulp_fallback(newsk, old_ctx); 782 return; 783 } 784 785 new_ctx = subflow_create_ctx(newsk, priority); 786 if (!new_ctx) { 787 subflow_ulp_fallback(newsk, old_ctx); 788 return; 789 } 790 791 /* see comments in subflow_syn_recv_sock(), MPTCP connection is fully 792 * established only after we receive the remote key 793 */ 794 new_ctx->conn_finished = 1; 795 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 796 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready; 797 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 798 new_ctx->tcp_write_space = old_ctx->tcp_write_space; 799 new_ctx->rel_write_seq = 1; 800 new_ctx->tcp_sock = newsk; 801 802 new_ctx->mp_capable = 1; 803 new_ctx->fourth_ack = subflow_req->remote_key_valid; 804 new_ctx->can_ack = subflow_req->remote_key_valid; 805 new_ctx->remote_key = subflow_req->remote_key; 806 new_ctx->local_key = subflow_req->local_key; 807 new_ctx->token = subflow_req->token; 808 new_ctx->ssn_offset = subflow_req->ssn_offset; 809 new_ctx->idsn = subflow_req->idsn; 810 } 811 812 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 813 .name = "mptcp", 814 .owner = THIS_MODULE, 815 .init = subflow_ulp_init, 816 .release = subflow_ulp_release, 817 .clone = subflow_ulp_clone, 818 }; 819 820 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 821 { 822 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 823 subflow_ops->slab_name = "request_sock_subflow"; 824 825 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 826 subflow_ops->obj_size, 0, 827 SLAB_ACCOUNT | 828 SLAB_TYPESAFE_BY_RCU, 829 NULL); 830 if (!subflow_ops->slab) 831 return -ENOMEM; 832 833 subflow_ops->destructor = subflow_req_destructor; 834 835 return 0; 836 } 837 838 void mptcp_subflow_init(void) 839 { 840 subflow_request_sock_ops = tcp_request_sock_ops; 841 if (subflow_ops_init(&subflow_request_sock_ops) != 0) 842 panic("MPTCP: failed to init subflow request sock ops\n"); 843 844 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 845 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req; 846 847 subflow_specific = ipv4_specific; 848 subflow_specific.conn_request = subflow_v4_conn_request; 849 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 850 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 851 subflow_specific.rebuild_header = subflow_rebuild_header; 852 853 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 854 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 855 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req; 856 857 subflow_v6_specific = ipv6_specific; 858 subflow_v6_specific.conn_request = subflow_v6_conn_request; 859 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 860 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 861 subflow_v6_specific.rebuild_header = subflow_rebuild_header; 862 863 subflow_v6m_specific = subflow_v6_specific; 864 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 865 subflow_v6m_specific.send_check = ipv4_specific.send_check; 866 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 867 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 868 subflow_v6m_specific.net_frag_header_len = 0; 869 #endif 870 871 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 872 panic("MPTCP: failed to register subflows to ULP\n"); 873 } 874