1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/algapi.h> 13 #include <crypto/sha.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp.h> 19 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 20 #include <net/ip6_route.h> 21 #endif 22 #include <net/mptcp.h> 23 #include "protocol.h" 24 #include "mib.h" 25 26 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, 27 enum linux_mptcp_mib_field field) 28 { 29 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); 30 } 31 32 static void subflow_req_destructor(struct request_sock *req) 33 { 34 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 35 36 pr_debug("subflow_req=%p", subflow_req); 37 38 if (subflow_req->msk) 39 sock_put((struct sock *)subflow_req->msk); 40 41 mptcp_token_destroy_request(req); 42 tcp_request_sock_ops.destructor(req); 43 } 44 45 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, 46 void *hmac) 47 { 48 u8 msg[8]; 49 50 put_unaligned_be32(nonce1, &msg[0]); 51 put_unaligned_be32(nonce2, &msg[4]); 52 53 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 54 } 55 56 /* validate received token and create truncated hmac and nonce for SYN-ACK */ 57 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req, 58 const struct sk_buff *skb) 59 { 60 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 61 u8 hmac[SHA256_DIGEST_SIZE]; 62 struct mptcp_sock *msk; 63 int local_id; 64 65 msk = mptcp_token_get_sock(subflow_req->token); 66 if (!msk) { 67 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 68 return NULL; 69 } 70 71 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); 72 if (local_id < 0) { 73 sock_put((struct sock *)msk); 74 return NULL; 75 } 76 subflow_req->local_id = local_id; 77 78 get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); 79 80 subflow_generate_hmac(msk->local_key, msk->remote_key, 81 subflow_req->local_nonce, 82 subflow_req->remote_nonce, hmac); 83 84 subflow_req->thmac = get_unaligned_be64(hmac); 85 return msk; 86 } 87 88 static void subflow_init_req(struct request_sock *req, 89 const struct sock *sk_listener, 90 struct sk_buff *skb) 91 { 92 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 93 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 94 struct mptcp_options_received mp_opt; 95 96 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 97 98 mptcp_get_options(skb, &mp_opt); 99 100 subflow_req->mp_capable = 0; 101 subflow_req->mp_join = 0; 102 subflow_req->msk = NULL; 103 mptcp_token_init_request(req); 104 105 #ifdef CONFIG_TCP_MD5SIG 106 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 107 * TCP option space. 108 */ 109 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) 110 return; 111 #endif 112 113 if (mp_opt.mp_capable) { 114 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 115 116 if (mp_opt.mp_join) 117 return; 118 } else if (mp_opt.mp_join) { 119 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 120 } 121 122 if (mp_opt.mp_capable && listener->request_mptcp) { 123 int err; 124 125 err = mptcp_token_new_request(req); 126 if (err == 0) 127 subflow_req->mp_capable = 1; 128 129 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 130 } else if (mp_opt.mp_join && listener->request_mptcp) { 131 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 132 subflow_req->mp_join = 1; 133 subflow_req->backup = mp_opt.backup; 134 subflow_req->remote_id = mp_opt.join_id; 135 subflow_req->token = mp_opt.token; 136 subflow_req->remote_nonce = mp_opt.nonce; 137 subflow_req->msk = subflow_token_join_request(req, skb); 138 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, 139 subflow_req->remote_nonce, subflow_req->msk); 140 } 141 } 142 143 static void subflow_v4_init_req(struct request_sock *req, 144 const struct sock *sk_listener, 145 struct sk_buff *skb) 146 { 147 tcp_rsk(req)->is_mptcp = 1; 148 149 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb); 150 151 subflow_init_req(req, sk_listener, skb); 152 } 153 154 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 155 static void subflow_v6_init_req(struct request_sock *req, 156 const struct sock *sk_listener, 157 struct sk_buff *skb) 158 { 159 tcp_rsk(req)->is_mptcp = 1; 160 161 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb); 162 163 subflow_init_req(req, sk_listener, skb); 164 } 165 #endif 166 167 /* validate received truncated hmac and create hmac for third ACK */ 168 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 169 { 170 u8 hmac[SHA256_DIGEST_SIZE]; 171 u64 thmac; 172 173 subflow_generate_hmac(subflow->remote_key, subflow->local_key, 174 subflow->remote_nonce, subflow->local_nonce, 175 hmac); 176 177 thmac = get_unaligned_be64(hmac); 178 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", 179 subflow, subflow->token, 180 (unsigned long long)thmac, 181 (unsigned long long)subflow->thmac); 182 183 return thmac == subflow->thmac; 184 } 185 186 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 187 { 188 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 189 struct mptcp_options_received mp_opt; 190 struct sock *parent = subflow->conn; 191 192 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 193 194 if (inet_sk_state_load(parent) == TCP_SYN_SENT) { 195 inet_sk_state_store(parent, TCP_ESTABLISHED); 196 parent->sk_state_change(parent); 197 } 198 199 /* be sure no special action on any packet other than syn-ack */ 200 if (subflow->conn_finished) 201 return; 202 203 subflow->conn_finished = 1; 204 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 205 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); 206 207 mptcp_get_options(skb, &mp_opt); 208 if (subflow->request_mptcp && mp_opt.mp_capable) { 209 subflow->mp_capable = 1; 210 subflow->can_ack = 1; 211 subflow->remote_key = mp_opt.sndr_key; 212 pr_debug("subflow=%p, remote_key=%llu", subflow, 213 subflow->remote_key); 214 } else if (subflow->request_join && mp_opt.mp_join) { 215 subflow->mp_join = 1; 216 subflow->thmac = mp_opt.thmac; 217 subflow->remote_nonce = mp_opt.nonce; 218 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow, 219 subflow->thmac, subflow->remote_nonce); 220 } else { 221 if (subflow->request_mptcp) 222 MPTCP_INC_STATS(sock_net(sk), 223 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); 224 mptcp_do_fallback(sk); 225 pr_fallback(mptcp_sk(subflow->conn)); 226 } 227 228 if (mptcp_check_fallback(sk)) { 229 mptcp_rcv_space_init(mptcp_sk(parent), sk); 230 return; 231 } 232 233 if (subflow->mp_capable) { 234 pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk), 235 subflow->remote_key); 236 mptcp_finish_connect(sk); 237 } else if (subflow->mp_join) { 238 u8 hmac[SHA256_DIGEST_SIZE]; 239 240 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", 241 subflow, subflow->thmac, 242 subflow->remote_nonce); 243 if (!subflow_thmac_valid(subflow)) { 244 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); 245 subflow->mp_join = 0; 246 goto do_reset; 247 } 248 249 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 250 subflow->local_nonce, 251 subflow->remote_nonce, 252 hmac); 253 254 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 255 256 if (!mptcp_finish_join(sk)) 257 goto do_reset; 258 259 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 260 } else { 261 do_reset: 262 tcp_send_active_reset(sk, GFP_ATOMIC); 263 tcp_done(sk); 264 } 265 } 266 267 static struct request_sock_ops subflow_request_sock_ops; 268 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops; 269 270 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) 271 { 272 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 273 274 pr_debug("subflow=%p", subflow); 275 276 /* Never answer to SYNs sent to broadcast or multicast */ 277 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 278 goto drop; 279 280 return tcp_conn_request(&subflow_request_sock_ops, 281 &subflow_request_sock_ipv4_ops, 282 sk, skb); 283 drop: 284 tcp_listendrop(sk); 285 return 0; 286 } 287 288 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 289 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops; 290 static struct inet_connection_sock_af_ops subflow_v6_specific; 291 static struct inet_connection_sock_af_ops subflow_v6m_specific; 292 293 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) 294 { 295 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 296 297 pr_debug("subflow=%p", subflow); 298 299 if (skb->protocol == htons(ETH_P_IP)) 300 return subflow_v4_conn_request(sk, skb); 301 302 if (!ipv6_unicast_destination(skb)) 303 goto drop; 304 305 return tcp_conn_request(&subflow_request_sock_ops, 306 &subflow_request_sock_ipv6_ops, sk, skb); 307 308 drop: 309 tcp_listendrop(sk); 310 return 0; /* don't send reset */ 311 } 312 #endif 313 314 /* validate hmac received in third ACK */ 315 static bool subflow_hmac_valid(const struct request_sock *req, 316 const struct mptcp_options_received *mp_opt) 317 { 318 const struct mptcp_subflow_request_sock *subflow_req; 319 u8 hmac[SHA256_DIGEST_SIZE]; 320 struct mptcp_sock *msk; 321 322 subflow_req = mptcp_subflow_rsk(req); 323 msk = subflow_req->msk; 324 if (!msk) 325 return false; 326 327 subflow_generate_hmac(msk->remote_key, msk->local_key, 328 subflow_req->remote_nonce, 329 subflow_req->local_nonce, hmac); 330 331 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); 332 } 333 334 static void mptcp_sock_destruct(struct sock *sk) 335 { 336 /* if new mptcp socket isn't accepted, it is free'd 337 * from the tcp listener sockets request queue, linked 338 * from req->sk. The tcp socket is released. 339 * This calls the ULP release function which will 340 * also remove the mptcp socket, via 341 * sock_put(ctx->conn). 342 * 343 * Problem is that the mptcp socket will not be in 344 * SYN_RECV state and doesn't have SOCK_DEAD flag. 345 * Both result in warnings from inet_sock_destruct. 346 */ 347 348 if (sk->sk_state == TCP_SYN_RECV) { 349 sk->sk_state = TCP_CLOSE; 350 WARN_ON_ONCE(sk->sk_socket); 351 sock_orphan(sk); 352 } 353 354 mptcp_token_destroy(mptcp_sk(sk)); 355 inet_sock_destruct(sk); 356 } 357 358 static void mptcp_force_close(struct sock *sk) 359 { 360 inet_sk_state_store(sk, TCP_CLOSE); 361 sk_common_release(sk); 362 } 363 364 static void subflow_ulp_fallback(struct sock *sk, 365 struct mptcp_subflow_context *old_ctx) 366 { 367 struct inet_connection_sock *icsk = inet_csk(sk); 368 369 mptcp_subflow_tcp_fallback(sk, old_ctx); 370 icsk->icsk_ulp_ops = NULL; 371 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); 372 tcp_sk(sk)->is_mptcp = 0; 373 } 374 375 static void subflow_drop_ctx(struct sock *ssk) 376 { 377 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); 378 379 if (!ctx) 380 return; 381 382 subflow_ulp_fallback(ssk, ctx); 383 if (ctx->conn) 384 sock_put(ctx->conn); 385 386 kfree_rcu(ctx, rcu); 387 } 388 389 static struct sock *subflow_syn_recv_sock(const struct sock *sk, 390 struct sk_buff *skb, 391 struct request_sock *req, 392 struct dst_entry *dst, 393 struct request_sock *req_unhash, 394 bool *own_req) 395 { 396 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 397 struct mptcp_subflow_request_sock *subflow_req; 398 struct mptcp_options_received mp_opt; 399 bool fallback, fallback_is_fatal; 400 struct sock *new_msk = NULL; 401 struct sock *child; 402 403 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 404 405 /* After child creation we must look for 'mp_capable' even when options 406 * are not parsed 407 */ 408 mp_opt.mp_capable = 0; 409 410 /* hopefully temporary handling for MP_JOIN+syncookie */ 411 subflow_req = mptcp_subflow_rsk(req); 412 fallback_is_fatal = subflow_req->mp_join; 413 fallback = !tcp_rsk(req)->is_mptcp; 414 if (fallback) 415 goto create_child; 416 417 /* if the sk is MP_CAPABLE, we try to fetch the client key */ 418 if (subflow_req->mp_capable) { 419 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) { 420 /* here we can receive and accept an in-window, 421 * out-of-order pkt, which will not carry the MP_CAPABLE 422 * opt even on mptcp enabled paths 423 */ 424 goto create_msk; 425 } 426 427 mptcp_get_options(skb, &mp_opt); 428 if (!mp_opt.mp_capable) { 429 fallback = true; 430 goto create_child; 431 } 432 433 create_msk: 434 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req); 435 if (!new_msk) 436 fallback = true; 437 } else if (subflow_req->mp_join) { 438 mptcp_get_options(skb, &mp_opt); 439 if (!mp_opt.mp_join || 440 !subflow_hmac_valid(req, &mp_opt)) { 441 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 442 fallback = true; 443 } 444 } 445 446 create_child: 447 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, 448 req_unhash, own_req); 449 450 if (child && *own_req) { 451 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); 452 453 tcp_rsk(req)->drop_req = false; 454 455 /* we need to fallback on ctx allocation failure and on pre-reqs 456 * checking above. In the latter scenario we additionally need 457 * to reset the context to non MPTCP status. 458 */ 459 if (!ctx || fallback) { 460 if (fallback_is_fatal) 461 goto dispose_child; 462 463 subflow_drop_ctx(child); 464 goto out; 465 } 466 467 if (ctx->mp_capable) { 468 /* new mpc subflow takes ownership of the newly 469 * created mptcp socket 470 */ 471 new_msk->sk_destruct = mptcp_sock_destruct; 472 mptcp_pm_new_connection(mptcp_sk(new_msk), 1); 473 mptcp_token_accept(subflow_req, mptcp_sk(new_msk)); 474 ctx->conn = new_msk; 475 new_msk = NULL; 476 477 /* with OoO packets we can reach here without ingress 478 * mpc option 479 */ 480 ctx->remote_key = mp_opt.sndr_key; 481 ctx->fully_established = mp_opt.mp_capable; 482 ctx->can_ack = mp_opt.mp_capable; 483 } else if (ctx->mp_join) { 484 struct mptcp_sock *owner; 485 486 owner = subflow_req->msk; 487 if (!owner) 488 goto dispose_child; 489 490 /* move the msk reference ownership to the subflow */ 491 subflow_req->msk = NULL; 492 ctx->conn = (struct sock *)owner; 493 if (!mptcp_finish_join(child)) 494 goto dispose_child; 495 496 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); 497 tcp_rsk(req)->drop_req = true; 498 } 499 } 500 501 out: 502 /* dispose of the left over mptcp master, if any */ 503 if (unlikely(new_msk)) 504 mptcp_force_close(new_msk); 505 506 /* check for expected invariant - should never trigger, just help 507 * catching eariler subtle bugs 508 */ 509 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 510 (!mptcp_subflow_ctx(child) || 511 !mptcp_subflow_ctx(child)->conn)); 512 return child; 513 514 dispose_child: 515 subflow_drop_ctx(child); 516 tcp_rsk(req)->drop_req = true; 517 tcp_send_active_reset(child, GFP_ATOMIC); 518 inet_csk_prepare_for_destroy_sock(child); 519 tcp_done(child); 520 521 /* The last child reference will be released by the caller */ 522 return child; 523 } 524 525 static struct inet_connection_sock_af_ops subflow_specific; 526 527 enum mapping_status { 528 MAPPING_OK, 529 MAPPING_INVALID, 530 MAPPING_EMPTY, 531 MAPPING_DATA_FIN, 532 MAPPING_DUMMY 533 }; 534 535 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq) 536 { 537 if ((u32)seq == (u32)old_seq) 538 return old_seq; 539 540 /* Assume map covers data not mapped yet. */ 541 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32)); 542 } 543 544 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 545 { 546 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d", 547 ssn, subflow->map_subflow_seq, subflow->map_data_len); 548 } 549 550 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) 551 { 552 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 553 unsigned int skb_consumed; 554 555 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; 556 if (WARN_ON_ONCE(skb_consumed >= skb->len)) 557 return true; 558 559 return skb->len - skb_consumed <= subflow->map_data_len - 560 mptcp_subflow_get_map_offset(subflow); 561 } 562 563 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) 564 { 565 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 566 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; 567 568 if (unlikely(before(ssn, subflow->map_subflow_seq))) { 569 /* Mapping covers data later in the subflow stream, 570 * currently unsupported. 571 */ 572 warn_bad_map(subflow, ssn); 573 return false; 574 } 575 if (unlikely(!before(ssn, subflow->map_subflow_seq + 576 subflow->map_data_len))) { 577 /* Mapping does covers past subflow data, invalid */ 578 warn_bad_map(subflow, ssn + skb->len); 579 return false; 580 } 581 return true; 582 } 583 584 static enum mapping_status get_mapping_status(struct sock *ssk) 585 { 586 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 587 struct mptcp_ext *mpext; 588 struct sk_buff *skb; 589 u16 data_len; 590 u64 map_seq; 591 592 skb = skb_peek(&ssk->sk_receive_queue); 593 if (!skb) 594 return MAPPING_EMPTY; 595 596 if (mptcp_check_fallback(ssk)) 597 return MAPPING_DUMMY; 598 599 mpext = mptcp_get_ext(skb); 600 if (!mpext || !mpext->use_map) { 601 if (!subflow->map_valid && !skb->len) { 602 /* the TCP stack deliver 0 len FIN pkt to the receive 603 * queue, that is the only 0len pkts ever expected here, 604 * and we can admit no mapping only for 0 len pkts 605 */ 606 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 607 WARN_ONCE(1, "0len seq %d:%d flags %x", 608 TCP_SKB_CB(skb)->seq, 609 TCP_SKB_CB(skb)->end_seq, 610 TCP_SKB_CB(skb)->tcp_flags); 611 sk_eat_skb(ssk, skb); 612 return MAPPING_EMPTY; 613 } 614 615 if (!subflow->map_valid) 616 return MAPPING_INVALID; 617 618 goto validate_seq; 619 } 620 621 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d", 622 mpext->data_seq, mpext->dsn64, mpext->subflow_seq, 623 mpext->data_len, mpext->data_fin); 624 625 data_len = mpext->data_len; 626 if (data_len == 0) { 627 pr_err("Infinite mapping not handled"); 628 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 629 return MAPPING_INVALID; 630 } 631 632 if (mpext->data_fin == 1) { 633 if (data_len == 1) { 634 pr_debug("DATA_FIN with no payload"); 635 if (subflow->map_valid) { 636 /* A DATA_FIN might arrive in a DSS 637 * option before the previous mapping 638 * has been fully consumed. Continue 639 * handling the existing mapping. 640 */ 641 skb_ext_del(skb, SKB_EXT_MPTCP); 642 return MAPPING_OK; 643 } else { 644 return MAPPING_DATA_FIN; 645 } 646 } 647 648 /* Adjust for DATA_FIN using 1 byte of sequence space */ 649 data_len--; 650 } 651 652 if (!mpext->dsn64) { 653 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len, 654 mpext->data_seq); 655 subflow->use_64bit_ack = 0; 656 pr_debug("expanded seq=%llu", subflow->map_seq); 657 } else { 658 map_seq = mpext->data_seq; 659 subflow->use_64bit_ack = 1; 660 } 661 662 if (subflow->map_valid) { 663 /* Allow replacing only with an identical map */ 664 if (subflow->map_seq == map_seq && 665 subflow->map_subflow_seq == mpext->subflow_seq && 666 subflow->map_data_len == data_len) { 667 skb_ext_del(skb, SKB_EXT_MPTCP); 668 return MAPPING_OK; 669 } 670 671 /* If this skb data are fully covered by the current mapping, 672 * the new map would need caching, which is not supported 673 */ 674 if (skb_is_fully_mapped(ssk, skb)) { 675 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); 676 return MAPPING_INVALID; 677 } 678 679 /* will validate the next map after consuming the current one */ 680 return MAPPING_OK; 681 } 682 683 subflow->map_seq = map_seq; 684 subflow->map_subflow_seq = mpext->subflow_seq; 685 subflow->map_data_len = data_len; 686 subflow->map_valid = 1; 687 subflow->mpc_map = mpext->mpc_map; 688 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u", 689 subflow->map_seq, subflow->map_subflow_seq, 690 subflow->map_data_len); 691 692 validate_seq: 693 /* we revalidate valid mapping on new skb, because we must ensure 694 * the current skb is completely covered by the available mapping 695 */ 696 if (!validate_mapping(ssk, skb)) 697 return MAPPING_INVALID; 698 699 skb_ext_del(skb, SKB_EXT_MPTCP); 700 return MAPPING_OK; 701 } 702 703 static int subflow_read_actor(read_descriptor_t *desc, 704 struct sk_buff *skb, 705 unsigned int offset, size_t len) 706 { 707 size_t copy_len = min(desc->count, len); 708 709 desc->count -= copy_len; 710 711 pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count); 712 return copy_len; 713 } 714 715 static bool subflow_check_data_avail(struct sock *ssk) 716 { 717 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 718 enum mapping_status status; 719 struct mptcp_sock *msk; 720 struct sk_buff *skb; 721 722 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk, 723 subflow->data_avail, skb_peek(&ssk->sk_receive_queue)); 724 if (subflow->data_avail) 725 return true; 726 727 msk = mptcp_sk(subflow->conn); 728 for (;;) { 729 u32 map_remaining; 730 size_t delta; 731 u64 ack_seq; 732 u64 old_ack; 733 734 status = get_mapping_status(ssk); 735 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status); 736 if (status == MAPPING_INVALID) { 737 ssk->sk_err = EBADMSG; 738 goto fatal; 739 } 740 if (status == MAPPING_DUMMY) { 741 __mptcp_do_fallback(msk); 742 skb = skb_peek(&ssk->sk_receive_queue); 743 subflow->map_valid = 1; 744 subflow->map_seq = READ_ONCE(msk->ack_seq); 745 subflow->map_data_len = skb->len; 746 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - 747 subflow->ssn_offset; 748 return true; 749 } 750 751 if (status != MAPPING_OK) 752 return false; 753 754 skb = skb_peek(&ssk->sk_receive_queue); 755 if (WARN_ON_ONCE(!skb)) 756 return false; 757 758 /* if msk lacks the remote key, this subflow must provide an 759 * MP_CAPABLE-based mapping 760 */ 761 if (unlikely(!READ_ONCE(msk->can_ack))) { 762 if (!subflow->mpc_map) { 763 ssk->sk_err = EBADMSG; 764 goto fatal; 765 } 766 WRITE_ONCE(msk->remote_key, subflow->remote_key); 767 WRITE_ONCE(msk->ack_seq, subflow->map_seq); 768 WRITE_ONCE(msk->can_ack, true); 769 } 770 771 old_ack = READ_ONCE(msk->ack_seq); 772 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 773 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, 774 ack_seq); 775 if (ack_seq == old_ack) 776 break; 777 778 /* only accept in-sequence mapping. Old values are spurious 779 * retransmission; we can hit "future" values on active backup 780 * subflow switch, we relay on retransmissions to get 781 * in-sequence data. 782 * Cuncurrent subflows support will require subflow data 783 * reordering 784 */ 785 map_remaining = subflow->map_data_len - 786 mptcp_subflow_get_map_offset(subflow); 787 if (before64(ack_seq, old_ack)) 788 delta = min_t(size_t, old_ack - ack_seq, map_remaining); 789 else 790 delta = min_t(size_t, ack_seq - old_ack, map_remaining); 791 792 /* discard mapped data */ 793 pr_debug("discarding %zu bytes, current map len=%d", delta, 794 map_remaining); 795 if (delta) { 796 read_descriptor_t desc = { 797 .count = delta, 798 }; 799 int ret; 800 801 ret = tcp_read_sock(ssk, &desc, subflow_read_actor); 802 if (ret < 0) { 803 ssk->sk_err = -ret; 804 goto fatal; 805 } 806 if (ret < delta) 807 return false; 808 if (delta == map_remaining) 809 subflow->map_valid = 0; 810 } 811 } 812 return true; 813 814 fatal: 815 /* fatal protocol error, close the socket */ 816 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 817 smp_wmb(); 818 ssk->sk_error_report(ssk); 819 tcp_set_state(ssk, TCP_CLOSE); 820 tcp_send_active_reset(ssk, GFP_ATOMIC); 821 return false; 822 } 823 824 bool mptcp_subflow_data_available(struct sock *sk) 825 { 826 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 827 struct sk_buff *skb; 828 829 /* check if current mapping is still valid */ 830 if (subflow->map_valid && 831 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { 832 subflow->map_valid = 0; 833 subflow->data_avail = 0; 834 835 pr_debug("Done with mapping: seq=%u data_len=%u", 836 subflow->map_subflow_seq, 837 subflow->map_data_len); 838 } 839 840 if (!subflow_check_data_avail(sk)) { 841 subflow->data_avail = 0; 842 return false; 843 } 844 845 skb = skb_peek(&sk->sk_receive_queue); 846 subflow->data_avail = skb && 847 before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq); 848 return subflow->data_avail; 849 } 850 851 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, 852 * not the ssk one. 853 * 854 * In mptcp, rwin is about the mptcp-level connection data. 855 * 856 * Data that is still on the ssk rx queue can thus be ignored, 857 * as far as mptcp peer is concerened that data is still inflight. 858 * DSS ACK is updated when skb is moved to the mptcp rx queue. 859 */ 860 void mptcp_space(const struct sock *ssk, int *space, int *full_space) 861 { 862 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 863 const struct sock *sk = subflow->conn; 864 865 *space = tcp_space(sk); 866 *full_space = tcp_full_space(sk); 867 } 868 869 static void subflow_data_ready(struct sock *sk) 870 { 871 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 872 struct sock *parent = subflow->conn; 873 struct mptcp_sock *msk; 874 875 msk = mptcp_sk(parent); 876 if ((1 << inet_sk_state_load(sk)) & (TCPF_LISTEN | TCPF_CLOSE)) { 877 set_bit(MPTCP_DATA_READY, &msk->flags); 878 parent->sk_data_ready(parent); 879 return; 880 } 881 882 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && 883 !subflow->mp_join); 884 885 if (mptcp_subflow_data_available(sk)) 886 mptcp_data_ready(parent, sk); 887 } 888 889 static void subflow_write_space(struct sock *sk) 890 { 891 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 892 struct sock *parent = subflow->conn; 893 894 sk_stream_write_space(sk); 895 if (sk_stream_is_writeable(sk)) { 896 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags); 897 smp_mb__after_atomic(); 898 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */ 899 sk_stream_write_space(parent); 900 } 901 } 902 903 static struct inet_connection_sock_af_ops * 904 subflow_default_af_ops(struct sock *sk) 905 { 906 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 907 if (sk->sk_family == AF_INET6) 908 return &subflow_v6_specific; 909 #endif 910 return &subflow_specific; 911 } 912 913 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 914 void mptcpv6_handle_mapped(struct sock *sk, bool mapped) 915 { 916 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 917 struct inet_connection_sock *icsk = inet_csk(sk); 918 struct inet_connection_sock_af_ops *target; 919 920 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 921 922 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", 923 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 924 925 if (likely(icsk->icsk_af_ops == target)) 926 return; 927 928 subflow->icsk_af_ops = icsk->icsk_af_ops; 929 icsk->icsk_af_ops = target; 930 } 931 #endif 932 933 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 934 struct sockaddr_storage *addr) 935 { 936 memset(addr, 0, sizeof(*addr)); 937 addr->ss_family = info->family; 938 if (addr->ss_family == AF_INET) { 939 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; 940 941 in_addr->sin_addr = info->addr; 942 in_addr->sin_port = info->port; 943 } 944 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 945 else if (addr->ss_family == AF_INET6) { 946 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; 947 948 in6_addr->sin6_addr = info->addr6; 949 in6_addr->sin6_port = info->port; 950 } 951 #endif 952 } 953 954 int __mptcp_subflow_connect(struct sock *sk, int ifindex, 955 const struct mptcp_addr_info *loc, 956 const struct mptcp_addr_info *remote) 957 { 958 struct mptcp_sock *msk = mptcp_sk(sk); 959 struct mptcp_subflow_context *subflow; 960 struct sockaddr_storage addr; 961 int local_id = loc->id; 962 struct socket *sf; 963 struct sock *ssk; 964 u32 remote_token; 965 int addrlen; 966 int err; 967 968 if (sk->sk_state != TCP_ESTABLISHED) 969 return -ENOTCONN; 970 971 err = mptcp_subflow_create_socket(sk, &sf); 972 if (err) 973 return err; 974 975 ssk = sf->sk; 976 subflow = mptcp_subflow_ctx(ssk); 977 do { 978 get_random_bytes(&subflow->local_nonce, sizeof(u32)); 979 } while (!subflow->local_nonce); 980 981 if (!local_id) { 982 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk); 983 if (err < 0) 984 goto failed; 985 986 local_id = err; 987 } 988 989 subflow->remote_key = msk->remote_key; 990 subflow->local_key = msk->local_key; 991 subflow->token = msk->token; 992 mptcp_info2sockaddr(loc, &addr); 993 994 addrlen = sizeof(struct sockaddr_in); 995 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 996 if (loc->family == AF_INET6) 997 addrlen = sizeof(struct sockaddr_in6); 998 #endif 999 ssk->sk_bound_dev_if = ifindex; 1000 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); 1001 if (err) 1002 goto failed; 1003 1004 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 1005 pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token, 1006 local_id); 1007 subflow->remote_token = remote_token; 1008 subflow->local_id = local_id; 1009 subflow->request_join = 1; 1010 subflow->request_bkup = 1; 1011 mptcp_info2sockaddr(remote, &addr); 1012 1013 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); 1014 if (err && err != -EINPROGRESS) 1015 goto failed; 1016 1017 spin_lock_bh(&msk->join_list_lock); 1018 list_add_tail(&subflow->node, &msk->join_list); 1019 spin_unlock_bh(&msk->join_list_lock); 1020 1021 return err; 1022 1023 failed: 1024 sock_release(sf); 1025 return err; 1026 } 1027 1028 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) 1029 { 1030 struct mptcp_subflow_context *subflow; 1031 struct net *net = sock_net(sk); 1032 struct socket *sf; 1033 int err; 1034 1035 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP, 1036 &sf); 1037 if (err) 1038 return err; 1039 1040 lock_sock(sf->sk); 1041 1042 /* kernel sockets do not by default acquire net ref, but TCP timer 1043 * needs it. 1044 */ 1045 sf->sk->sk_net_refcnt = 1; 1046 get_net(net); 1047 #ifdef CONFIG_PROC_FS 1048 this_cpu_add(*net->core.sock_inuse, 1); 1049 #endif 1050 err = tcp_set_ulp(sf->sk, "mptcp"); 1051 release_sock(sf->sk); 1052 1053 if (err) { 1054 sock_release(sf); 1055 return err; 1056 } 1057 1058 /* the newly created socket really belongs to the owning MPTCP master 1059 * socket, even if for additional subflows the allocation is performed 1060 * by a kernel workqueue. Adjust inode references, so that the 1061 * procfs/diag interaces really show this one belonging to the correct 1062 * user. 1063 */ 1064 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; 1065 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; 1066 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1067 1068 subflow = mptcp_subflow_ctx(sf->sk); 1069 pr_debug("subflow=%p", subflow); 1070 1071 *new_sock = sf; 1072 sock_hold(sk); 1073 subflow->conn = sk; 1074 1075 return 0; 1076 } 1077 1078 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, 1079 gfp_t priority) 1080 { 1081 struct inet_connection_sock *icsk = inet_csk(sk); 1082 struct mptcp_subflow_context *ctx; 1083 1084 ctx = kzalloc(sizeof(*ctx), priority); 1085 if (!ctx) 1086 return NULL; 1087 1088 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 1089 INIT_LIST_HEAD(&ctx->node); 1090 1091 pr_debug("subflow=%p", ctx); 1092 1093 ctx->tcp_sock = sk; 1094 1095 return ctx; 1096 } 1097 1098 static void __subflow_state_change(struct sock *sk) 1099 { 1100 struct socket_wq *wq; 1101 1102 rcu_read_lock(); 1103 wq = rcu_dereference(sk->sk_wq); 1104 if (skwq_has_sleeper(wq)) 1105 wake_up_interruptible_all(&wq->wait); 1106 rcu_read_unlock(); 1107 } 1108 1109 static bool subflow_is_done(const struct sock *sk) 1110 { 1111 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; 1112 } 1113 1114 static void subflow_state_change(struct sock *sk) 1115 { 1116 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1117 struct sock *parent = subflow->conn; 1118 1119 __subflow_state_change(sk); 1120 1121 if (subflow_simultaneous_connect(sk)) { 1122 mptcp_do_fallback(sk); 1123 mptcp_rcv_space_init(mptcp_sk(parent), sk); 1124 pr_fallback(mptcp_sk(parent)); 1125 subflow->conn_finished = 1; 1126 if (inet_sk_state_load(parent) == TCP_SYN_SENT) { 1127 inet_sk_state_store(parent, TCP_ESTABLISHED); 1128 parent->sk_state_change(parent); 1129 } 1130 } 1131 1132 /* as recvmsg() does not acquire the subflow socket for ssk selection 1133 * a fin packet carrying a DSS can be unnoticed if we don't trigger 1134 * the data available machinery here. 1135 */ 1136 if (mptcp_subflow_data_available(sk)) 1137 mptcp_data_ready(parent, sk); 1138 1139 if (!(parent->sk_shutdown & RCV_SHUTDOWN) && 1140 !subflow->rx_eof && subflow_is_done(sk)) { 1141 subflow->rx_eof = 1; 1142 mptcp_subflow_eof(parent); 1143 } 1144 } 1145 1146 static int subflow_ulp_init(struct sock *sk) 1147 { 1148 struct inet_connection_sock *icsk = inet_csk(sk); 1149 struct mptcp_subflow_context *ctx; 1150 struct tcp_sock *tp = tcp_sk(sk); 1151 int err = 0; 1152 1153 /* disallow attaching ULP to a socket unless it has been 1154 * created with sock_create_kern() 1155 */ 1156 if (!sk->sk_kern_sock) { 1157 err = -EOPNOTSUPP; 1158 goto out; 1159 } 1160 1161 ctx = subflow_create_ctx(sk, GFP_KERNEL); 1162 if (!ctx) { 1163 err = -ENOMEM; 1164 goto out; 1165 } 1166 1167 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); 1168 1169 tp->is_mptcp = 1; 1170 ctx->icsk_af_ops = icsk->icsk_af_ops; 1171 icsk->icsk_af_ops = subflow_default_af_ops(sk); 1172 ctx->tcp_data_ready = sk->sk_data_ready; 1173 ctx->tcp_state_change = sk->sk_state_change; 1174 ctx->tcp_write_space = sk->sk_write_space; 1175 sk->sk_data_ready = subflow_data_ready; 1176 sk->sk_write_space = subflow_write_space; 1177 sk->sk_state_change = subflow_state_change; 1178 out: 1179 return err; 1180 } 1181 1182 static void subflow_ulp_release(struct sock *sk) 1183 { 1184 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk); 1185 1186 if (!ctx) 1187 return; 1188 1189 if (ctx->conn) 1190 sock_put(ctx->conn); 1191 1192 kfree_rcu(ctx, rcu); 1193 } 1194 1195 static void subflow_ulp_clone(const struct request_sock *req, 1196 struct sock *newsk, 1197 const gfp_t priority) 1198 { 1199 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 1200 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); 1201 struct mptcp_subflow_context *new_ctx; 1202 1203 if (!tcp_rsk(req)->is_mptcp || 1204 (!subflow_req->mp_capable && !subflow_req->mp_join)) { 1205 subflow_ulp_fallback(newsk, old_ctx); 1206 return; 1207 } 1208 1209 new_ctx = subflow_create_ctx(newsk, priority); 1210 if (!new_ctx) { 1211 subflow_ulp_fallback(newsk, old_ctx); 1212 return; 1213 } 1214 1215 new_ctx->conn_finished = 1; 1216 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; 1217 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready; 1218 new_ctx->tcp_state_change = old_ctx->tcp_state_change; 1219 new_ctx->tcp_write_space = old_ctx->tcp_write_space; 1220 new_ctx->rel_write_seq = 1; 1221 new_ctx->tcp_sock = newsk; 1222 1223 if (subflow_req->mp_capable) { 1224 /* see comments in subflow_syn_recv_sock(), MPTCP connection 1225 * is fully established only after we receive the remote key 1226 */ 1227 new_ctx->mp_capable = 1; 1228 new_ctx->local_key = subflow_req->local_key; 1229 new_ctx->token = subflow_req->token; 1230 new_ctx->ssn_offset = subflow_req->ssn_offset; 1231 new_ctx->idsn = subflow_req->idsn; 1232 } else if (subflow_req->mp_join) { 1233 new_ctx->ssn_offset = subflow_req->ssn_offset; 1234 new_ctx->mp_join = 1; 1235 new_ctx->fully_established = 1; 1236 new_ctx->backup = subflow_req->backup; 1237 new_ctx->local_id = subflow_req->local_id; 1238 new_ctx->token = subflow_req->token; 1239 new_ctx->thmac = subflow_req->thmac; 1240 } 1241 } 1242 1243 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { 1244 .name = "mptcp", 1245 .owner = THIS_MODULE, 1246 .init = subflow_ulp_init, 1247 .release = subflow_ulp_release, 1248 .clone = subflow_ulp_clone, 1249 }; 1250 1251 static int subflow_ops_init(struct request_sock_ops *subflow_ops) 1252 { 1253 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); 1254 subflow_ops->slab_name = "request_sock_subflow"; 1255 1256 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, 1257 subflow_ops->obj_size, 0, 1258 SLAB_ACCOUNT | 1259 SLAB_TYPESAFE_BY_RCU, 1260 NULL); 1261 if (!subflow_ops->slab) 1262 return -ENOMEM; 1263 1264 subflow_ops->destructor = subflow_req_destructor; 1265 1266 return 0; 1267 } 1268 1269 void __init mptcp_subflow_init(void) 1270 { 1271 subflow_request_sock_ops = tcp_request_sock_ops; 1272 if (subflow_ops_init(&subflow_request_sock_ops) != 0) 1273 panic("MPTCP: failed to init subflow request sock ops\n"); 1274 1275 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; 1276 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req; 1277 1278 subflow_specific = ipv4_specific; 1279 subflow_specific.conn_request = subflow_v4_conn_request; 1280 subflow_specific.syn_recv_sock = subflow_syn_recv_sock; 1281 subflow_specific.sk_rx_dst_set = subflow_finish_connect; 1282 1283 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1284 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; 1285 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req; 1286 1287 subflow_v6_specific = ipv6_specific; 1288 subflow_v6_specific.conn_request = subflow_v6_conn_request; 1289 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; 1290 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; 1291 1292 subflow_v6m_specific = subflow_v6_specific; 1293 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; 1294 subflow_v6m_specific.send_check = ipv4_specific.send_check; 1295 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; 1296 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; 1297 subflow_v6m_specific.net_frag_header_len = 0; 1298 #endif 1299 1300 mptcp_diag_subflow_init(&subflow_ulp_ops); 1301 1302 if (tcp_register_ulp(&subflow_ulp_ops) != 0) 1303 panic("MPTCP: failed to register subflows to ULP\n"); 1304 } 1305