1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/sched/signal.h> 13 #include <linux/atomic.h> 14 #include <net/aligned_data.h> 15 #include <net/rps.h> 16 #include <net/sock.h> 17 #include <net/inet_common.h> 18 #include <net/inet_hashtables.h> 19 #include <net/protocol.h> 20 #include <net/tcp_states.h> 21 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 22 #include <net/transp_v6.h> 23 #endif 24 #include <net/mptcp.h> 25 #include <net/hotdata.h> 26 #include <net/xfrm.h> 27 #include <asm/ioctls.h> 28 #include "protocol.h" 29 #include "mib.h" 30 31 #define CREATE_TRACE_POINTS 32 #include <trace/events/mptcp.h> 33 34 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 35 struct mptcp6_sock { 36 struct mptcp_sock msk; 37 struct ipv6_pinfo np; 38 }; 39 #endif 40 41 enum { 42 MPTCP_CMSG_TS = BIT(0), 43 MPTCP_CMSG_INQ = BIT(1), 44 }; 45 46 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; 47 48 static void __mptcp_destroy_sock(struct sock *sk); 49 static void mptcp_check_send_data_fin(struct sock *sk); 50 51 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = { 52 .bh_lock = INIT_LOCAL_LOCK(bh_lock), 53 }; 54 static struct net_device *mptcp_napi_dev; 55 56 /* Returns end sequence number of the receiver's advertised window */ 57 static u64 mptcp_wnd_end(const struct mptcp_sock *msk) 58 { 59 return READ_ONCE(msk->wnd_end); 60 } 61 62 static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk) 63 { 64 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 65 if (sk->sk_prot == &tcpv6_prot) 66 return &inet6_stream_ops; 67 #endif 68 WARN_ON_ONCE(sk->sk_prot != &tcp_prot); 69 return &inet_stream_ops; 70 } 71 72 bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib) 73 { 74 struct net *net = sock_net((struct sock *)msk); 75 76 if (__mptcp_check_fallback(msk)) 77 return true; 78 79 spin_lock_bh(&msk->fallback_lock); 80 if (!msk->allow_infinite_fallback) { 81 spin_unlock_bh(&msk->fallback_lock); 82 return false; 83 } 84 85 msk->allow_subflows = false; 86 set_bit(MPTCP_FALLBACK_DONE, &msk->flags); 87 __MPTCP_INC_STATS(net, fb_mib); 88 spin_unlock_bh(&msk->fallback_lock); 89 return true; 90 } 91 92 static int __mptcp_socket_create(struct mptcp_sock *msk) 93 { 94 struct mptcp_subflow_context *subflow; 95 struct sock *sk = (struct sock *)msk; 96 struct socket *ssock; 97 int err; 98 99 err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock); 100 if (err) 101 return err; 102 103 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio; 104 WRITE_ONCE(msk->first, ssock->sk); 105 subflow = mptcp_subflow_ctx(ssock->sk); 106 list_add(&subflow->node, &msk->conn_list); 107 sock_hold(ssock->sk); 108 subflow->request_mptcp = 1; 109 subflow->subflow_id = msk->subflow_id++; 110 111 /* This is the first subflow, always with id 0 */ 112 WRITE_ONCE(subflow->local_id, 0); 113 mptcp_sock_graft(msk->first, sk->sk_socket); 114 iput(SOCK_INODE(ssock)); 115 116 return 0; 117 } 118 119 /* If the MPC handshake is not started, returns the first subflow, 120 * eventually allocating it. 121 */ 122 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk) 123 { 124 struct sock *sk = (struct sock *)msk; 125 int ret; 126 127 if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 128 return ERR_PTR(-EINVAL); 129 130 if (!msk->first) { 131 ret = __mptcp_socket_create(msk); 132 if (ret) 133 return ERR_PTR(ret); 134 } 135 136 return msk->first; 137 } 138 139 static void mptcp_drop(struct sock *sk, struct sk_buff *skb) 140 { 141 sk_drops_skbadd(sk, skb); 142 __kfree_skb(skb); 143 } 144 145 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, 146 struct sk_buff *from) 147 { 148 bool fragstolen; 149 int delta; 150 151 if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) || 152 MPTCP_SKB_CB(from)->offset || 153 ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) || 154 !skb_try_coalesce(to, from, &fragstolen, &delta)) 155 return false; 156 157 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n", 158 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, 159 to->len, MPTCP_SKB_CB(from)->end_seq); 160 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; 161 162 /* note the fwd memory can reach a negative value after accounting 163 * for the delta, but the later skb free will restore a non 164 * negative one 165 */ 166 atomic_add(delta, &sk->sk_rmem_alloc); 167 sk_mem_charge(sk, delta); 168 kfree_skb_partial(from, fragstolen); 169 170 return true; 171 } 172 173 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, 174 struct sk_buff *from) 175 { 176 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) 177 return false; 178 179 return mptcp_try_coalesce((struct sock *)msk, to, from); 180 } 181 182 /* "inspired" by tcp_data_queue_ofo(), main differences: 183 * - use mptcp seqs 184 * - don't cope with sacks 185 */ 186 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) 187 { 188 struct sock *sk = (struct sock *)msk; 189 struct rb_node **p, *parent; 190 u64 seq, end_seq, max_seq; 191 struct sk_buff *skb1; 192 193 seq = MPTCP_SKB_CB(skb)->map_seq; 194 end_seq = MPTCP_SKB_CB(skb)->end_seq; 195 max_seq = atomic64_read(&msk->rcv_wnd_sent); 196 197 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, 198 RB_EMPTY_ROOT(&msk->out_of_order_queue)); 199 if (after64(end_seq, max_seq)) { 200 /* out of window */ 201 mptcp_drop(sk, skb); 202 pr_debug("oow by %lld, rcv_wnd_sent %llu\n", 203 (unsigned long long)end_seq - (unsigned long)max_seq, 204 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent)); 205 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); 206 return; 207 } 208 209 p = &msk->out_of_order_queue.rb_node; 210 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE); 211 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { 212 rb_link_node(&skb->rbnode, NULL, p); 213 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); 214 msk->ooo_last_skb = skb; 215 goto end; 216 } 217 218 /* with 2 subflows, adding at end of ooo queue is quite likely 219 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 220 */ 221 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { 222 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); 223 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); 224 return; 225 } 226 227 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ 228 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { 229 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); 230 parent = &msk->ooo_last_skb->rbnode; 231 p = &parent->rb_right; 232 goto insert; 233 } 234 235 /* Find place to insert this segment. Handle overlaps on the way. */ 236 parent = NULL; 237 while (*p) { 238 parent = *p; 239 skb1 = rb_to_skb(parent); 240 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { 241 p = &parent->rb_left; 242 continue; 243 } 244 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { 245 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { 246 /* All the bits are present. Drop. */ 247 mptcp_drop(sk, skb); 248 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 249 return; 250 } 251 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { 252 /* partial overlap: 253 * | skb | 254 * | skb1 | 255 * continue traversing 256 */ 257 } else { 258 /* skb's seq == skb1's seq and skb covers skb1. 259 * Replace skb1 with skb. 260 */ 261 rb_replace_node(&skb1->rbnode, &skb->rbnode, 262 &msk->out_of_order_queue); 263 mptcp_drop(sk, skb1); 264 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 265 goto merge_right; 266 } 267 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { 268 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); 269 return; 270 } 271 p = &parent->rb_right; 272 } 273 274 insert: 275 /* Insert segment into RB tree. */ 276 rb_link_node(&skb->rbnode, parent, p); 277 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); 278 279 merge_right: 280 /* Remove other segments covered by skb. */ 281 while ((skb1 = skb_rb_next(skb)) != NULL) { 282 if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) 283 break; 284 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); 285 mptcp_drop(sk, skb1); 286 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 287 } 288 /* If there is no skb after us, we are the last_skb ! */ 289 if (!skb1) 290 msk->ooo_last_skb = skb; 291 292 end: 293 skb_condense(skb); 294 skb_set_owner_r(skb, sk); 295 } 296 297 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, 298 struct sk_buff *skb, unsigned int offset, 299 size_t copy_len) 300 { 301 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 302 struct sock *sk = (struct sock *)msk; 303 struct sk_buff *tail; 304 bool has_rxtstamp; 305 306 __skb_unlink(skb, &ssk->sk_receive_queue); 307 308 skb_ext_reset(skb); 309 skb_orphan(skb); 310 311 /* try to fetch required memory from subflow */ 312 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 313 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); 314 goto drop; 315 } 316 317 has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; 318 319 /* the skb map_seq accounts for the skb offset: 320 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq 321 * value 322 */ 323 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); 324 MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; 325 MPTCP_SKB_CB(skb)->offset = offset; 326 MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp; 327 MPTCP_SKB_CB(skb)->cant_coalesce = 0; 328 329 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { 330 /* in sequence */ 331 msk->bytes_received += copy_len; 332 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); 333 tail = skb_peek_tail(&sk->sk_receive_queue); 334 if (tail && mptcp_try_coalesce(sk, tail, skb)) 335 return true; 336 337 skb_set_owner_r(skb, sk); 338 __skb_queue_tail(&sk->sk_receive_queue, skb); 339 return true; 340 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { 341 mptcp_data_queue_ofo(msk, skb); 342 return false; 343 } 344 345 /* old data, keep it simple and drop the whole pkt, sender 346 * will retransmit as needed, if needed. 347 */ 348 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 349 drop: 350 mptcp_drop(sk, skb); 351 return false; 352 } 353 354 static void mptcp_stop_rtx_timer(struct sock *sk) 355 { 356 struct inet_connection_sock *icsk = inet_csk(sk); 357 358 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 359 mptcp_sk(sk)->timer_ival = 0; 360 } 361 362 static void mptcp_close_wake_up(struct sock *sk) 363 { 364 if (sock_flag(sk, SOCK_DEAD)) 365 return; 366 367 sk->sk_state_change(sk); 368 if (sk->sk_shutdown == SHUTDOWN_MASK || 369 sk->sk_state == TCP_CLOSE) 370 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 371 else 372 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 373 } 374 375 static void mptcp_shutdown_subflows(struct mptcp_sock *msk) 376 { 377 struct mptcp_subflow_context *subflow; 378 379 mptcp_for_each_subflow(msk, subflow) { 380 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 381 bool slow; 382 383 slow = lock_sock_fast(ssk); 384 tcp_shutdown(ssk, SEND_SHUTDOWN); 385 unlock_sock_fast(ssk, slow); 386 } 387 } 388 389 /* called under the msk socket lock */ 390 static bool mptcp_pending_data_fin_ack(struct sock *sk) 391 { 392 struct mptcp_sock *msk = mptcp_sk(sk); 393 394 return ((1 << sk->sk_state) & 395 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && 396 msk->write_seq == READ_ONCE(msk->snd_una); 397 } 398 399 static void mptcp_check_data_fin_ack(struct sock *sk) 400 { 401 struct mptcp_sock *msk = mptcp_sk(sk); 402 403 /* Look for an acknowledged DATA_FIN */ 404 if (mptcp_pending_data_fin_ack(sk)) { 405 WRITE_ONCE(msk->snd_data_fin_enable, 0); 406 407 switch (sk->sk_state) { 408 case TCP_FIN_WAIT1: 409 mptcp_set_state(sk, TCP_FIN_WAIT2); 410 break; 411 case TCP_CLOSING: 412 case TCP_LAST_ACK: 413 mptcp_shutdown_subflows(msk); 414 mptcp_set_state(sk, TCP_CLOSE); 415 break; 416 } 417 418 mptcp_close_wake_up(sk); 419 } 420 } 421 422 /* can be called with no lock acquired */ 423 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) 424 { 425 struct mptcp_sock *msk = mptcp_sk(sk); 426 427 if (READ_ONCE(msk->rcv_data_fin) && 428 ((1 << inet_sk_state_load(sk)) & 429 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { 430 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); 431 432 if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) { 433 if (seq) 434 *seq = rcv_data_fin_seq; 435 436 return true; 437 } 438 } 439 440 return false; 441 } 442 443 static void mptcp_set_datafin_timeout(struct sock *sk) 444 { 445 struct inet_connection_sock *icsk = inet_csk(sk); 446 u32 retransmits; 447 448 retransmits = min_t(u32, icsk->icsk_retransmits, 449 ilog2(TCP_RTO_MAX / TCP_RTO_MIN)); 450 451 mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits; 452 } 453 454 static void __mptcp_set_timeout(struct sock *sk, long tout) 455 { 456 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; 457 } 458 459 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) 460 { 461 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 462 463 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? 464 icsk_timeout(inet_csk(ssk)) - jiffies : 0; 465 } 466 467 static void mptcp_set_timeout(struct sock *sk) 468 { 469 struct mptcp_subflow_context *subflow; 470 long tout = 0; 471 472 mptcp_for_each_subflow(mptcp_sk(sk), subflow) 473 tout = max(tout, mptcp_timeout_from_subflow(subflow)); 474 __mptcp_set_timeout(sk, tout); 475 } 476 477 static inline bool tcp_can_send_ack(const struct sock *ssk) 478 { 479 return !((1 << inet_sk_state_load(ssk)) & 480 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN)); 481 } 482 483 void __mptcp_subflow_send_ack(struct sock *ssk) 484 { 485 if (tcp_can_send_ack(ssk)) 486 tcp_send_ack(ssk); 487 } 488 489 static void mptcp_subflow_send_ack(struct sock *ssk) 490 { 491 bool slow; 492 493 slow = lock_sock_fast(ssk); 494 __mptcp_subflow_send_ack(ssk); 495 unlock_sock_fast(ssk, slow); 496 } 497 498 static void mptcp_send_ack(struct mptcp_sock *msk) 499 { 500 struct mptcp_subflow_context *subflow; 501 502 mptcp_for_each_subflow(msk, subflow) 503 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); 504 } 505 506 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied) 507 { 508 bool slow; 509 510 slow = lock_sock_fast(ssk); 511 if (tcp_can_send_ack(ssk)) 512 tcp_cleanup_rbuf(ssk, copied); 513 unlock_sock_fast(ssk, slow); 514 } 515 516 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) 517 { 518 const struct inet_connection_sock *icsk = inet_csk(ssk); 519 u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending); 520 const struct tcp_sock *tp = tcp_sk(ssk); 521 522 return (ack_pending & ICSK_ACK_SCHED) && 523 ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) > 524 READ_ONCE(icsk->icsk_ack.rcv_mss)) || 525 (rx_empty && ack_pending & 526 (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); 527 } 528 529 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) 530 { 531 int old_space = READ_ONCE(msk->old_wspace); 532 struct mptcp_subflow_context *subflow; 533 struct sock *sk = (struct sock *)msk; 534 int space = __mptcp_space(sk); 535 bool cleanup, rx_empty; 536 537 cleanup = (space > 0) && (space >= (old_space << 1)) && copied; 538 rx_empty = !sk_rmem_alloc_get(sk) && copied; 539 540 mptcp_for_each_subflow(msk, subflow) { 541 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 542 543 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) 544 mptcp_subflow_cleanup_rbuf(ssk, copied); 545 } 546 } 547 548 static bool mptcp_check_data_fin(struct sock *sk) 549 { 550 struct mptcp_sock *msk = mptcp_sk(sk); 551 u64 rcv_data_fin_seq; 552 bool ret = false; 553 554 /* Need to ack a DATA_FIN received from a peer while this side 555 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. 556 * msk->rcv_data_fin was set when parsing the incoming options 557 * at the subflow level and the msk lock was not held, so this 558 * is the first opportunity to act on the DATA_FIN and change 559 * the msk state. 560 * 561 * If we are caught up to the sequence number of the incoming 562 * DATA_FIN, send the DATA_ACK now and do state transition. If 563 * not caught up, do nothing and let the recv code send DATA_ACK 564 * when catching up. 565 */ 566 567 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { 568 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); 569 WRITE_ONCE(msk->rcv_data_fin, 0); 570 571 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); 572 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 573 574 switch (sk->sk_state) { 575 case TCP_ESTABLISHED: 576 mptcp_set_state(sk, TCP_CLOSE_WAIT); 577 break; 578 case TCP_FIN_WAIT1: 579 mptcp_set_state(sk, TCP_CLOSING); 580 break; 581 case TCP_FIN_WAIT2: 582 mptcp_shutdown_subflows(msk); 583 mptcp_set_state(sk, TCP_CLOSE); 584 break; 585 default: 586 /* Other states not expected */ 587 WARN_ON_ONCE(1); 588 break; 589 } 590 591 ret = true; 592 if (!__mptcp_check_fallback(msk)) 593 mptcp_send_ack(msk); 594 mptcp_close_wake_up(sk); 595 } 596 return ret; 597 } 598 599 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) 600 { 601 if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) { 602 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET); 603 mptcp_subflow_reset(ssk); 604 } 605 } 606 607 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, 608 struct sock *ssk) 609 { 610 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 611 struct sock *sk = (struct sock *)msk; 612 bool more_data_avail; 613 struct tcp_sock *tp; 614 bool ret = false; 615 616 pr_debug("msk=%p ssk=%p\n", msk, ssk); 617 tp = tcp_sk(ssk); 618 do { 619 u32 map_remaining, offset; 620 u32 seq = tp->copied_seq; 621 struct sk_buff *skb; 622 bool fin; 623 624 if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) 625 break; 626 627 /* try to move as much data as available */ 628 map_remaining = subflow->map_data_len - 629 mptcp_subflow_get_map_offset(subflow); 630 631 skb = skb_peek(&ssk->sk_receive_queue); 632 if (unlikely(!skb)) 633 break; 634 635 if (__mptcp_check_fallback(msk)) { 636 /* Under fallback skbs have no MPTCP extension and TCP could 637 * collapse them between the dummy map creation and the 638 * current dequeue. Be sure to adjust the map size. 639 */ 640 map_remaining = skb->len; 641 subflow->map_data_len = skb->len; 642 } 643 644 offset = seq - TCP_SKB_CB(skb)->seq; 645 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 646 if (fin) 647 seq++; 648 649 if (offset < skb->len) { 650 size_t len = skb->len - offset; 651 652 ret = __mptcp_move_skb(msk, ssk, skb, offset, len) || ret; 653 seq += len; 654 655 if (unlikely(map_remaining < len)) { 656 DEBUG_NET_WARN_ON_ONCE(1); 657 mptcp_dss_corruption(msk, ssk); 658 } 659 } else { 660 if (unlikely(!fin)) { 661 DEBUG_NET_WARN_ON_ONCE(1); 662 mptcp_dss_corruption(msk, ssk); 663 } 664 665 sk_eat_skb(ssk, skb); 666 } 667 668 WRITE_ONCE(tp->copied_seq, seq); 669 more_data_avail = mptcp_subflow_data_available(ssk); 670 671 } while (more_data_avail); 672 673 if (ret) 674 msk->last_data_recv = tcp_jiffies32; 675 return ret; 676 } 677 678 static bool __mptcp_ofo_queue(struct mptcp_sock *msk) 679 { 680 struct sock *sk = (struct sock *)msk; 681 struct sk_buff *skb, *tail; 682 bool moved = false; 683 struct rb_node *p; 684 u64 end_seq; 685 686 p = rb_first(&msk->out_of_order_queue); 687 pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); 688 while (p) { 689 skb = rb_to_skb(p); 690 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) 691 break; 692 693 p = rb_next(p); 694 rb_erase(&skb->rbnode, &msk->out_of_order_queue); 695 696 if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq, 697 msk->ack_seq))) { 698 mptcp_drop(sk, skb); 699 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 700 continue; 701 } 702 703 end_seq = MPTCP_SKB_CB(skb)->end_seq; 704 tail = skb_peek_tail(&sk->sk_receive_queue); 705 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { 706 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; 707 708 /* skip overlapping data, if any */ 709 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n", 710 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, 711 delta); 712 MPTCP_SKB_CB(skb)->offset += delta; 713 MPTCP_SKB_CB(skb)->map_seq += delta; 714 __skb_queue_tail(&sk->sk_receive_queue, skb); 715 } 716 msk->bytes_received += end_seq - msk->ack_seq; 717 WRITE_ONCE(msk->ack_seq, end_seq); 718 moved = true; 719 } 720 return moved; 721 } 722 723 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) 724 { 725 int err = sock_error(ssk); 726 int ssk_state; 727 728 if (!err) 729 return false; 730 731 /* only propagate errors on fallen-back sockets or 732 * on MPC connect 733 */ 734 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) 735 return false; 736 737 /* We need to propagate only transition to CLOSE state. 738 * Orphaned socket will see such state change via 739 * subflow_sched_work_if_closed() and that path will properly 740 * destroy the msk as needed. 741 */ 742 ssk_state = inet_sk_state_load(ssk); 743 if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) 744 mptcp_set_state(sk, ssk_state); 745 WRITE_ONCE(sk->sk_err, -err); 746 747 /* This barrier is coupled with smp_rmb() in mptcp_poll() */ 748 smp_wmb(); 749 sk_error_report(sk); 750 return true; 751 } 752 753 void __mptcp_error_report(struct sock *sk) 754 { 755 struct mptcp_subflow_context *subflow; 756 struct mptcp_sock *msk = mptcp_sk(sk); 757 758 mptcp_for_each_subflow(msk, subflow) 759 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) 760 break; 761 } 762 763 /* In most cases we will be able to lock the mptcp socket. If its already 764 * owned, we need to defer to the work queue to avoid ABBA deadlock. 765 */ 766 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) 767 { 768 struct sock *sk = (struct sock *)msk; 769 bool moved; 770 771 moved = __mptcp_move_skbs_from_subflow(msk, ssk); 772 __mptcp_ofo_queue(msk); 773 if (unlikely(ssk->sk_err)) { 774 if (!sock_owned_by_user(sk)) 775 __mptcp_error_report(sk); 776 else 777 __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags); 778 } 779 780 /* If the moves have caught up with the DATA_FIN sequence number 781 * it's time to ack the DATA_FIN and change socket state, but 782 * this is not a good place to change state. Let the workqueue 783 * do it. 784 */ 785 if (mptcp_pending_data_fin(sk, NULL)) 786 mptcp_schedule_work(sk); 787 return moved; 788 } 789 790 static void __mptcp_rcvbuf_update(struct sock *sk, struct sock *ssk) 791 { 792 if (unlikely(ssk->sk_rcvbuf > sk->sk_rcvbuf)) 793 WRITE_ONCE(sk->sk_rcvbuf, ssk->sk_rcvbuf); 794 } 795 796 static void __mptcp_data_ready(struct sock *sk, struct sock *ssk) 797 { 798 struct mptcp_sock *msk = mptcp_sk(sk); 799 800 __mptcp_rcvbuf_update(sk, ssk); 801 802 /* Wake-up the reader only for in-sequence data */ 803 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) 804 sk->sk_data_ready(sk); 805 } 806 807 void mptcp_data_ready(struct sock *sk, struct sock *ssk) 808 { 809 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 810 811 /* The peer can send data while we are shutting down this 812 * subflow at msk destruction time, but we must avoid enqueuing 813 * more data to the msk receive queue 814 */ 815 if (unlikely(subflow->disposable)) 816 return; 817 818 mptcp_data_lock(sk); 819 if (!sock_owned_by_user(sk)) 820 __mptcp_data_ready(sk, ssk); 821 else 822 __set_bit(MPTCP_DEQUEUE, &mptcp_sk(sk)->cb_flags); 823 mptcp_data_unlock(sk); 824 } 825 826 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) 827 { 828 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); 829 msk->allow_infinite_fallback = false; 830 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); 831 } 832 833 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) 834 { 835 struct sock *sk = (struct sock *)msk; 836 837 if (sk->sk_state != TCP_ESTABLISHED) 838 return false; 839 840 spin_lock_bh(&msk->fallback_lock); 841 if (!msk->allow_subflows) { 842 spin_unlock_bh(&msk->fallback_lock); 843 return false; 844 } 845 mptcp_subflow_joined(msk, ssk); 846 spin_unlock_bh(&msk->fallback_lock); 847 848 /* attach to msk socket only after we are sure we will deal with it 849 * at close time 850 */ 851 if (sk->sk_socket && !ssk->sk_socket) 852 mptcp_sock_graft(ssk, sk->sk_socket); 853 854 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; 855 mptcp_sockopt_sync_locked(msk, ssk); 856 mptcp_stop_tout_timer(sk); 857 __mptcp_propagate_sndbuf(sk, ssk); 858 return true; 859 } 860 861 static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list) 862 { 863 struct mptcp_subflow_context *tmp, *subflow; 864 struct mptcp_sock *msk = mptcp_sk(sk); 865 866 list_for_each_entry_safe(subflow, tmp, join_list, node) { 867 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 868 bool slow = lock_sock_fast(ssk); 869 870 list_move_tail(&subflow->node, &msk->conn_list); 871 if (!__mptcp_finish_join(msk, ssk)) 872 mptcp_subflow_reset(ssk); 873 unlock_sock_fast(ssk, slow); 874 } 875 } 876 877 static bool mptcp_rtx_timer_pending(struct sock *sk) 878 { 879 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); 880 } 881 882 static void mptcp_reset_rtx_timer(struct sock *sk) 883 { 884 struct inet_connection_sock *icsk = inet_csk(sk); 885 unsigned long tout; 886 887 /* prevent rescheduling on close */ 888 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE)) 889 return; 890 891 tout = mptcp_sk(sk)->timer_ival; 892 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); 893 } 894 895 bool mptcp_schedule_work(struct sock *sk) 896 { 897 if (inet_sk_state_load(sk) != TCP_CLOSE && 898 schedule_work(&mptcp_sk(sk)->work)) { 899 /* each subflow already holds a reference to the sk, and the 900 * workqueue is invoked by a subflow, so sk can't go away here. 901 */ 902 sock_hold(sk); 903 return true; 904 } 905 return false; 906 } 907 908 static bool mptcp_skb_can_collapse_to(u64 write_seq, 909 const struct sk_buff *skb, 910 const struct mptcp_ext *mpext) 911 { 912 if (!tcp_skb_can_collapse_to(skb)) 913 return false; 914 915 /* can collapse only if MPTCP level sequence is in order and this 916 * mapping has not been xmitted yet 917 */ 918 return mpext && mpext->data_seq + mpext->data_len == write_seq && 919 !mpext->frozen; 920 } 921 922 /* we can append data to the given data frag if: 923 * - there is space available in the backing page_frag 924 * - the data frag tail matches the current page_frag free offset 925 * - the data frag end sequence number matches the current write seq 926 */ 927 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, 928 const struct page_frag *pfrag, 929 const struct mptcp_data_frag *df) 930 { 931 return df && pfrag->page == df->page && 932 pfrag->size - pfrag->offset > 0 && 933 pfrag->offset == (df->offset + df->data_len) && 934 df->data_seq + df->data_len == msk->write_seq; 935 } 936 937 static void dfrag_uncharge(struct sock *sk, int len) 938 { 939 sk_mem_uncharge(sk, len); 940 sk_wmem_queued_add(sk, -len); 941 } 942 943 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) 944 { 945 int len = dfrag->data_len + dfrag->overhead; 946 947 list_del(&dfrag->list); 948 dfrag_uncharge(sk, len); 949 put_page(dfrag->page); 950 } 951 952 /* called under both the msk socket lock and the data lock */ 953 static void __mptcp_clean_una(struct sock *sk) 954 { 955 struct mptcp_sock *msk = mptcp_sk(sk); 956 struct mptcp_data_frag *dtmp, *dfrag; 957 u64 snd_una; 958 959 snd_una = msk->snd_una; 960 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { 961 if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) 962 break; 963 964 if (unlikely(dfrag == msk->first_pending)) { 965 /* in recovery mode can see ack after the current snd head */ 966 if (WARN_ON_ONCE(!msk->recovery)) 967 break; 968 969 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 970 } 971 972 dfrag_clear(sk, dfrag); 973 } 974 975 dfrag = mptcp_rtx_head(sk); 976 if (dfrag && after64(snd_una, dfrag->data_seq)) { 977 u64 delta = snd_una - dfrag->data_seq; 978 979 /* prevent wrap around in recovery mode */ 980 if (unlikely(delta > dfrag->already_sent)) { 981 if (WARN_ON_ONCE(!msk->recovery)) 982 goto out; 983 if (WARN_ON_ONCE(delta > dfrag->data_len)) 984 goto out; 985 dfrag->already_sent += delta - dfrag->already_sent; 986 } 987 988 dfrag->data_seq += delta; 989 dfrag->offset += delta; 990 dfrag->data_len -= delta; 991 dfrag->already_sent -= delta; 992 993 dfrag_uncharge(sk, delta); 994 } 995 996 /* all retransmitted data acked, recovery completed */ 997 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) 998 msk->recovery = false; 999 1000 out: 1001 if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) { 1002 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) 1003 mptcp_stop_rtx_timer(sk); 1004 } else { 1005 mptcp_reset_rtx_timer(sk); 1006 } 1007 1008 if (mptcp_pending_data_fin_ack(sk)) 1009 mptcp_schedule_work(sk); 1010 } 1011 1012 static void __mptcp_clean_una_wakeup(struct sock *sk) 1013 { 1014 lockdep_assert_held_once(&sk->sk_lock.slock); 1015 1016 __mptcp_clean_una(sk); 1017 mptcp_write_space(sk); 1018 } 1019 1020 static void mptcp_clean_una_wakeup(struct sock *sk) 1021 { 1022 mptcp_data_lock(sk); 1023 __mptcp_clean_una_wakeup(sk); 1024 mptcp_data_unlock(sk); 1025 } 1026 1027 static void mptcp_enter_memory_pressure(struct sock *sk) 1028 { 1029 struct mptcp_subflow_context *subflow; 1030 struct mptcp_sock *msk = mptcp_sk(sk); 1031 bool first = true; 1032 1033 mptcp_for_each_subflow(msk, subflow) { 1034 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 1035 1036 if (first) 1037 tcp_enter_memory_pressure(ssk); 1038 sk_stream_moderate_sndbuf(ssk); 1039 1040 first = false; 1041 } 1042 __mptcp_sync_sndbuf(sk); 1043 } 1044 1045 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of 1046 * data 1047 */ 1048 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 1049 { 1050 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), 1051 pfrag, sk->sk_allocation))) 1052 return true; 1053 1054 mptcp_enter_memory_pressure(sk); 1055 return false; 1056 } 1057 1058 static struct mptcp_data_frag * 1059 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, 1060 int orig_offset) 1061 { 1062 int offset = ALIGN(orig_offset, sizeof(long)); 1063 struct mptcp_data_frag *dfrag; 1064 1065 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); 1066 dfrag->data_len = 0; 1067 dfrag->data_seq = msk->write_seq; 1068 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); 1069 dfrag->offset = offset + sizeof(struct mptcp_data_frag); 1070 dfrag->already_sent = 0; 1071 dfrag->page = pfrag->page; 1072 1073 return dfrag; 1074 } 1075 1076 struct mptcp_sendmsg_info { 1077 int mss_now; 1078 int size_goal; 1079 u16 limit; 1080 u16 sent; 1081 unsigned int flags; 1082 bool data_lock_held; 1083 }; 1084 1085 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, 1086 u64 data_seq, int avail_size) 1087 { 1088 u64 window_end = mptcp_wnd_end(msk); 1089 u64 mptcp_snd_wnd; 1090 1091 if (__mptcp_check_fallback(msk)) 1092 return avail_size; 1093 1094 mptcp_snd_wnd = window_end - data_seq; 1095 avail_size = min_t(unsigned int, mptcp_snd_wnd, avail_size); 1096 1097 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { 1098 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); 1099 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED); 1100 } 1101 1102 return avail_size; 1103 } 1104 1105 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp) 1106 { 1107 struct skb_ext *mpext = __skb_ext_alloc(gfp); 1108 1109 if (!mpext) 1110 return false; 1111 __skb_ext_set(skb, SKB_EXT_MPTCP, mpext); 1112 return true; 1113 } 1114 1115 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) 1116 { 1117 struct sk_buff *skb; 1118 1119 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 1120 if (likely(skb)) { 1121 if (likely(__mptcp_add_ext(skb, gfp))) { 1122 skb_reserve(skb, MAX_TCP_HEADER); 1123 skb->ip_summed = CHECKSUM_PARTIAL; 1124 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 1125 return skb; 1126 } 1127 __kfree_skb(skb); 1128 } else { 1129 mptcp_enter_memory_pressure(sk); 1130 } 1131 return NULL; 1132 } 1133 1134 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) 1135 { 1136 struct sk_buff *skb; 1137 1138 skb = __mptcp_do_alloc_tx_skb(sk, gfp); 1139 if (!skb) 1140 return NULL; 1141 1142 if (likely(sk_wmem_schedule(ssk, skb->truesize))) { 1143 tcp_skb_entail(ssk, skb); 1144 return skb; 1145 } 1146 tcp_skb_tsorted_anchor_cleanup(skb); 1147 kfree_skb(skb); 1148 return NULL; 1149 } 1150 1151 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) 1152 { 1153 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; 1154 1155 return __mptcp_alloc_tx_skb(sk, ssk, gfp); 1156 } 1157 1158 /* note: this always recompute the csum on the whole skb, even 1159 * if we just appended a single frag. More status info needed 1160 */ 1161 static void mptcp_update_data_checksum(struct sk_buff *skb, int added) 1162 { 1163 struct mptcp_ext *mpext = mptcp_get_ext(skb); 1164 __wsum csum = ~csum_unfold(mpext->csum); 1165 int offset = skb->len - added; 1166 1167 mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset)); 1168 } 1169 1170 static void mptcp_update_infinite_map(struct mptcp_sock *msk, 1171 struct sock *ssk, 1172 struct mptcp_ext *mpext) 1173 { 1174 if (!mpext) 1175 return; 1176 1177 mpext->infinite_map = 1; 1178 mpext->data_len = 0; 1179 1180 if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) { 1181 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED); 1182 mptcp_subflow_reset(ssk); 1183 return; 1184 } 1185 1186 mptcp_subflow_ctx(ssk)->send_infinite_map = 0; 1187 } 1188 1189 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1)) 1190 1191 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, 1192 struct mptcp_data_frag *dfrag, 1193 struct mptcp_sendmsg_info *info) 1194 { 1195 u64 data_seq = dfrag->data_seq + info->sent; 1196 int offset = dfrag->offset + info->sent; 1197 struct mptcp_sock *msk = mptcp_sk(sk); 1198 bool zero_window_probe = false; 1199 struct mptcp_ext *mpext = NULL; 1200 bool can_coalesce = false; 1201 bool reuse_skb = true; 1202 struct sk_buff *skb; 1203 size_t copy; 1204 int i; 1205 1206 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n", 1207 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); 1208 1209 if (WARN_ON_ONCE(info->sent > info->limit || 1210 info->limit > dfrag->data_len)) 1211 return 0; 1212 1213 if (unlikely(!__tcp_can_send(ssk))) 1214 return -EAGAIN; 1215 1216 /* compute send limit */ 1217 if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE)) 1218 ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE; 1219 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); 1220 copy = info->size_goal; 1221 1222 skb = tcp_write_queue_tail(ssk); 1223 if (skb && copy > skb->len) { 1224 /* Limit the write to the size available in the 1225 * current skb, if any, so that we create at most a new skb. 1226 * Explicitly tells TCP internals to avoid collapsing on later 1227 * queue management operation, to avoid breaking the ext <-> 1228 * SSN association set here 1229 */ 1230 mpext = mptcp_get_ext(skb); 1231 if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) { 1232 TCP_SKB_CB(skb)->eor = 1; 1233 tcp_mark_push(tcp_sk(ssk), skb); 1234 goto alloc_skb; 1235 } 1236 1237 i = skb_shinfo(skb)->nr_frags; 1238 can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); 1239 if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { 1240 tcp_mark_push(tcp_sk(ssk), skb); 1241 goto alloc_skb; 1242 } 1243 1244 copy -= skb->len; 1245 } else { 1246 alloc_skb: 1247 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); 1248 if (!skb) 1249 return -ENOMEM; 1250 1251 i = skb_shinfo(skb)->nr_frags; 1252 reuse_skb = false; 1253 mpext = mptcp_get_ext(skb); 1254 } 1255 1256 /* Zero window and all data acked? Probe. */ 1257 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); 1258 if (copy == 0) { 1259 u64 snd_una = READ_ONCE(msk->snd_una); 1260 1261 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { 1262 tcp_remove_empty_skb(ssk); 1263 return 0; 1264 } 1265 1266 zero_window_probe = true; 1267 data_seq = snd_una - 1; 1268 copy = 1; 1269 } 1270 1271 copy = min_t(size_t, copy, info->limit - info->sent); 1272 if (!sk_wmem_schedule(ssk, copy)) { 1273 tcp_remove_empty_skb(ssk); 1274 return -ENOMEM; 1275 } 1276 1277 if (can_coalesce) { 1278 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1279 } else { 1280 get_page(dfrag->page); 1281 skb_fill_page_desc(skb, i, dfrag->page, offset, copy); 1282 } 1283 1284 skb->len += copy; 1285 skb->data_len += copy; 1286 skb->truesize += copy; 1287 sk_wmem_queued_add(ssk, copy); 1288 sk_mem_charge(ssk, copy); 1289 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); 1290 TCP_SKB_CB(skb)->end_seq += copy; 1291 tcp_skb_pcount_set(skb, 0); 1292 1293 /* on skb reuse we just need to update the DSS len */ 1294 if (reuse_skb) { 1295 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1296 mpext->data_len += copy; 1297 goto out; 1298 } 1299 1300 memset(mpext, 0, sizeof(*mpext)); 1301 mpext->data_seq = data_seq; 1302 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; 1303 mpext->data_len = copy; 1304 mpext->use_map = 1; 1305 mpext->dsn64 = 1; 1306 1307 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n", 1308 mpext->data_seq, mpext->subflow_seq, mpext->data_len, 1309 mpext->dsn64); 1310 1311 if (zero_window_probe) { 1312 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; 1313 mpext->frozen = 1; 1314 if (READ_ONCE(msk->csum_enabled)) 1315 mptcp_update_data_checksum(skb, copy); 1316 tcp_push_pending_frames(ssk); 1317 return 0; 1318 } 1319 out: 1320 if (READ_ONCE(msk->csum_enabled)) 1321 mptcp_update_data_checksum(skb, copy); 1322 if (mptcp_subflow_ctx(ssk)->send_infinite_map) 1323 mptcp_update_infinite_map(msk, ssk, mpext); 1324 trace_mptcp_sendmsg_frag(mpext); 1325 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; 1326 return copy; 1327 } 1328 1329 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ 1330 sizeof(struct tcphdr) - \ 1331 MAX_TCP_OPTION_SPACE - \ 1332 sizeof(struct ipv6hdr) - \ 1333 sizeof(struct frag_hdr)) 1334 1335 struct subflow_send_info { 1336 struct sock *ssk; 1337 u64 linger_time; 1338 }; 1339 1340 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) 1341 { 1342 if (!subflow->stale) 1343 return; 1344 1345 subflow->stale = 0; 1346 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); 1347 } 1348 1349 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) 1350 { 1351 if (unlikely(subflow->stale)) { 1352 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); 1353 1354 if (subflow->stale_rcv_tstamp == rcv_tstamp) 1355 return false; 1356 1357 mptcp_subflow_set_active(subflow); 1358 } 1359 return __mptcp_subflow_active(subflow); 1360 } 1361 1362 #define SSK_MODE_ACTIVE 0 1363 #define SSK_MODE_BACKUP 1 1364 #define SSK_MODE_MAX 2 1365 1366 /* implement the mptcp packet scheduler; 1367 * returns the subflow that will transmit the next DSS 1368 * additionally updates the rtx timeout 1369 */ 1370 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) 1371 { 1372 struct subflow_send_info send_info[SSK_MODE_MAX]; 1373 struct mptcp_subflow_context *subflow; 1374 struct sock *sk = (struct sock *)msk; 1375 u32 pace, burst, wmem; 1376 int i, nr_active = 0; 1377 struct sock *ssk; 1378 u64 linger_time; 1379 long tout = 0; 1380 1381 /* pick the subflow with the lower wmem/wspace ratio */ 1382 for (i = 0; i < SSK_MODE_MAX; ++i) { 1383 send_info[i].ssk = NULL; 1384 send_info[i].linger_time = -1; 1385 } 1386 1387 mptcp_for_each_subflow(msk, subflow) { 1388 bool backup = subflow->backup || subflow->request_bkup; 1389 1390 trace_mptcp_subflow_get_send(subflow); 1391 ssk = mptcp_subflow_tcp_sock(subflow); 1392 if (!mptcp_subflow_active(subflow)) 1393 continue; 1394 1395 tout = max(tout, mptcp_timeout_from_subflow(subflow)); 1396 nr_active += !backup; 1397 pace = subflow->avg_pacing_rate; 1398 if (unlikely(!pace)) { 1399 /* init pacing rate from socket */ 1400 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); 1401 pace = subflow->avg_pacing_rate; 1402 if (!pace) 1403 continue; 1404 } 1405 1406 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); 1407 if (linger_time < send_info[backup].linger_time) { 1408 send_info[backup].ssk = ssk; 1409 send_info[backup].linger_time = linger_time; 1410 } 1411 } 1412 __mptcp_set_timeout(sk, tout); 1413 1414 /* pick the best backup if no other subflow is active */ 1415 if (!nr_active) 1416 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; 1417 1418 /* According to the blest algorithm, to avoid HoL blocking for the 1419 * faster flow, we need to: 1420 * - estimate the faster flow linger time 1421 * - use the above to estimate the amount of byte transferred 1422 * by the faster flow 1423 * - check that the amount of queued data is greater than the above, 1424 * otherwise do not use the picked, slower, subflow 1425 * We select the subflow with the shorter estimated time to flush 1426 * the queued mem, which basically ensure the above. We just need 1427 * to check that subflow has a non empty cwin. 1428 */ 1429 ssk = send_info[SSK_MODE_ACTIVE].ssk; 1430 if (!ssk || !sk_stream_memory_free(ssk)) 1431 return NULL; 1432 1433 burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); 1434 wmem = READ_ONCE(ssk->sk_wmem_queued); 1435 if (!burst) 1436 return ssk; 1437 1438 subflow = mptcp_subflow_ctx(ssk); 1439 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + 1440 READ_ONCE(ssk->sk_pacing_rate) * burst, 1441 burst + wmem); 1442 msk->snd_burst = burst; 1443 return ssk; 1444 } 1445 1446 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) 1447 { 1448 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); 1449 release_sock(ssk); 1450 } 1451 1452 static void mptcp_update_post_push(struct mptcp_sock *msk, 1453 struct mptcp_data_frag *dfrag, 1454 u32 sent) 1455 { 1456 u64 snd_nxt_new = dfrag->data_seq; 1457 1458 dfrag->already_sent += sent; 1459 1460 msk->snd_burst -= sent; 1461 1462 snd_nxt_new += dfrag->already_sent; 1463 1464 /* snd_nxt_new can be smaller than snd_nxt in case mptcp 1465 * is recovering after a failover. In that event, this re-sends 1466 * old segments. 1467 * 1468 * Thus compute snd_nxt_new candidate based on 1469 * the dfrag->data_seq that was sent and the data 1470 * that has been handed to the subflow for transmission 1471 * and skip update in case it was old dfrag. 1472 */ 1473 if (likely(after64(snd_nxt_new, msk->snd_nxt))) { 1474 msk->bytes_sent += snd_nxt_new - msk->snd_nxt; 1475 WRITE_ONCE(msk->snd_nxt, snd_nxt_new); 1476 } 1477 } 1478 1479 void mptcp_check_and_set_pending(struct sock *sk) 1480 { 1481 if (mptcp_send_head(sk)) { 1482 mptcp_data_lock(sk); 1483 mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING); 1484 mptcp_data_unlock(sk); 1485 } 1486 } 1487 1488 static int __subflow_push_pending(struct sock *sk, struct sock *ssk, 1489 struct mptcp_sendmsg_info *info) 1490 { 1491 struct mptcp_sock *msk = mptcp_sk(sk); 1492 struct mptcp_data_frag *dfrag; 1493 int len, copied = 0, err = 0; 1494 1495 while ((dfrag = mptcp_send_head(sk))) { 1496 info->sent = dfrag->already_sent; 1497 info->limit = dfrag->data_len; 1498 len = dfrag->data_len - dfrag->already_sent; 1499 while (len > 0) { 1500 int ret = 0; 1501 1502 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info); 1503 if (ret <= 0) { 1504 err = copied ? : ret; 1505 goto out; 1506 } 1507 1508 info->sent += ret; 1509 copied += ret; 1510 len -= ret; 1511 1512 mptcp_update_post_push(msk, dfrag, ret); 1513 } 1514 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1515 1516 if (msk->snd_burst <= 0 || 1517 !sk_stream_memory_free(ssk) || 1518 !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) { 1519 err = copied; 1520 goto out; 1521 } 1522 mptcp_set_timeout(sk); 1523 } 1524 err = copied; 1525 1526 out: 1527 if (err > 0) 1528 msk->last_data_sent = tcp_jiffies32; 1529 return err; 1530 } 1531 1532 void __mptcp_push_pending(struct sock *sk, unsigned int flags) 1533 { 1534 struct sock *prev_ssk = NULL, *ssk = NULL; 1535 struct mptcp_sock *msk = mptcp_sk(sk); 1536 struct mptcp_sendmsg_info info = { 1537 .flags = flags, 1538 }; 1539 bool do_check_data_fin = false; 1540 int push_count = 1; 1541 1542 while (mptcp_send_head(sk) && (push_count > 0)) { 1543 struct mptcp_subflow_context *subflow; 1544 int ret = 0; 1545 1546 if (mptcp_sched_get_send(msk)) 1547 break; 1548 1549 push_count = 0; 1550 1551 mptcp_for_each_subflow(msk, subflow) { 1552 if (READ_ONCE(subflow->scheduled)) { 1553 mptcp_subflow_set_scheduled(subflow, false); 1554 1555 prev_ssk = ssk; 1556 ssk = mptcp_subflow_tcp_sock(subflow); 1557 if (ssk != prev_ssk) { 1558 /* First check. If the ssk has changed since 1559 * the last round, release prev_ssk 1560 */ 1561 if (prev_ssk) 1562 mptcp_push_release(prev_ssk, &info); 1563 1564 /* Need to lock the new subflow only if different 1565 * from the previous one, otherwise we are still 1566 * helding the relevant lock 1567 */ 1568 lock_sock(ssk); 1569 } 1570 1571 push_count++; 1572 1573 ret = __subflow_push_pending(sk, ssk, &info); 1574 if (ret <= 0) { 1575 if (ret != -EAGAIN || 1576 (1 << ssk->sk_state) & 1577 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE)) 1578 push_count--; 1579 continue; 1580 } 1581 do_check_data_fin = true; 1582 } 1583 } 1584 } 1585 1586 /* at this point we held the socket lock for the last subflow we used */ 1587 if (ssk) 1588 mptcp_push_release(ssk, &info); 1589 1590 /* ensure the rtx timer is running */ 1591 if (!mptcp_rtx_timer_pending(sk)) 1592 mptcp_reset_rtx_timer(sk); 1593 if (do_check_data_fin) 1594 mptcp_check_send_data_fin(sk); 1595 } 1596 1597 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first) 1598 { 1599 struct mptcp_sock *msk = mptcp_sk(sk); 1600 struct mptcp_sendmsg_info info = { 1601 .data_lock_held = true, 1602 }; 1603 bool keep_pushing = true; 1604 struct sock *xmit_ssk; 1605 int copied = 0; 1606 1607 info.flags = 0; 1608 while (mptcp_send_head(sk) && keep_pushing) { 1609 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1610 int ret = 0; 1611 1612 /* check for a different subflow usage only after 1613 * spooling the first chunk of data 1614 */ 1615 if (first) { 1616 mptcp_subflow_set_scheduled(subflow, false); 1617 ret = __subflow_push_pending(sk, ssk, &info); 1618 first = false; 1619 if (ret <= 0) 1620 break; 1621 copied += ret; 1622 continue; 1623 } 1624 1625 if (mptcp_sched_get_send(msk)) 1626 goto out; 1627 1628 if (READ_ONCE(subflow->scheduled)) { 1629 mptcp_subflow_set_scheduled(subflow, false); 1630 ret = __subflow_push_pending(sk, ssk, &info); 1631 if (ret <= 0) 1632 keep_pushing = false; 1633 copied += ret; 1634 } 1635 1636 mptcp_for_each_subflow(msk, subflow) { 1637 if (READ_ONCE(subflow->scheduled)) { 1638 xmit_ssk = mptcp_subflow_tcp_sock(subflow); 1639 if (xmit_ssk != ssk) { 1640 mptcp_subflow_delegate(subflow, 1641 MPTCP_DELEGATE_SEND); 1642 keep_pushing = false; 1643 } 1644 } 1645 } 1646 } 1647 1648 out: 1649 /* __mptcp_alloc_tx_skb could have released some wmem and we are 1650 * not going to flush it via release_sock() 1651 */ 1652 if (copied) { 1653 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, 1654 info.size_goal); 1655 if (!mptcp_rtx_timer_pending(sk)) 1656 mptcp_reset_rtx_timer(sk); 1657 1658 if (msk->snd_data_fin_enable && 1659 msk->snd_nxt + 1 == msk->write_seq) 1660 mptcp_schedule_work(sk); 1661 } 1662 } 1663 1664 static int mptcp_disconnect(struct sock *sk, int flags); 1665 1666 static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, 1667 size_t len, int *copied_syn) 1668 { 1669 unsigned int saved_flags = msg->msg_flags; 1670 struct mptcp_sock *msk = mptcp_sk(sk); 1671 struct sock *ssk; 1672 int ret; 1673 1674 /* on flags based fastopen the mptcp is supposed to create the 1675 * first subflow right now. Otherwise we are in the defer_connect 1676 * path, and the first subflow must be already present. 1677 * Since the defer_connect flag is cleared after the first succsful 1678 * fastopen attempt, no need to check for additional subflow status. 1679 */ 1680 if (msg->msg_flags & MSG_FASTOPEN) { 1681 ssk = __mptcp_nmpc_sk(msk); 1682 if (IS_ERR(ssk)) 1683 return PTR_ERR(ssk); 1684 } 1685 if (!msk->first) 1686 return -EINVAL; 1687 1688 ssk = msk->first; 1689 1690 lock_sock(ssk); 1691 msg->msg_flags |= MSG_DONTWAIT; 1692 msk->fastopening = 1; 1693 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); 1694 msk->fastopening = 0; 1695 msg->msg_flags = saved_flags; 1696 release_sock(ssk); 1697 1698 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */ 1699 if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) { 1700 ret = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1701 msg->msg_namelen, msg->msg_flags, 1); 1702 1703 /* Keep the same behaviour of plain TCP: zero the copied bytes in 1704 * case of any error, except timeout or signal 1705 */ 1706 if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR) 1707 *copied_syn = 0; 1708 } else if (ret && ret != -EINPROGRESS) { 1709 /* The disconnect() op called by tcp_sendmsg_fastopen()/ 1710 * __inet_stream_connect() can fail, due to looking check, 1711 * see mptcp_disconnect(). 1712 * Attempt it again outside the problematic scope. 1713 */ 1714 if (!mptcp_disconnect(sk, 0)) { 1715 sk->sk_disconnects++; 1716 sk->sk_socket->state = SS_UNCONNECTED; 1717 } 1718 } 1719 inet_clear_bit(DEFER_CONNECT, sk); 1720 1721 return ret; 1722 } 1723 1724 static int do_copy_data_nocache(struct sock *sk, int copy, 1725 struct iov_iter *from, char *to) 1726 { 1727 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { 1728 if (!copy_from_iter_full_nocache(to, copy, from)) 1729 return -EFAULT; 1730 } else if (!copy_from_iter_full(to, copy, from)) { 1731 return -EFAULT; 1732 } 1733 return 0; 1734 } 1735 1736 /* open-code sk_stream_memory_free() plus sent limit computation to 1737 * avoid indirect calls in fast-path. 1738 * Called under the msk socket lock, so we can avoid a bunch of ONCE 1739 * annotations. 1740 */ 1741 static u32 mptcp_send_limit(const struct sock *sk) 1742 { 1743 const struct mptcp_sock *msk = mptcp_sk(sk); 1744 u32 limit, not_sent; 1745 1746 if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf)) 1747 return 0; 1748 1749 limit = mptcp_notsent_lowat(sk); 1750 if (limit == UINT_MAX) 1751 return UINT_MAX; 1752 1753 not_sent = msk->write_seq - msk->snd_nxt; 1754 if (not_sent >= limit) 1755 return 0; 1756 1757 return limit - not_sent; 1758 } 1759 1760 static void mptcp_rps_record_subflows(const struct mptcp_sock *msk) 1761 { 1762 struct mptcp_subflow_context *subflow; 1763 1764 if (!rfs_is_needed()) 1765 return; 1766 1767 mptcp_for_each_subflow(msk, subflow) { 1768 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 1769 1770 sock_rps_record_flow(ssk); 1771 } 1772 } 1773 1774 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1775 { 1776 struct mptcp_sock *msk = mptcp_sk(sk); 1777 struct page_frag *pfrag; 1778 size_t copied = 0; 1779 int ret = 0; 1780 long timeo; 1781 1782 /* silently ignore everything else */ 1783 msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_FASTOPEN; 1784 1785 lock_sock(sk); 1786 1787 mptcp_rps_record_subflows(msk); 1788 1789 if (unlikely(inet_test_bit(DEFER_CONNECT, sk) || 1790 msg->msg_flags & MSG_FASTOPEN)) { 1791 int copied_syn = 0; 1792 1793 ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn); 1794 copied += copied_syn; 1795 if (ret == -EINPROGRESS && copied_syn > 0) 1796 goto out; 1797 else if (ret) 1798 goto do_error; 1799 } 1800 1801 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1802 1803 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { 1804 ret = sk_stream_wait_connect(sk, &timeo); 1805 if (ret) 1806 goto do_error; 1807 } 1808 1809 ret = -EPIPE; 1810 if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))) 1811 goto do_error; 1812 1813 pfrag = sk_page_frag(sk); 1814 1815 while (msg_data_left(msg)) { 1816 int total_ts, frag_truesize = 0; 1817 struct mptcp_data_frag *dfrag; 1818 bool dfrag_collapsed; 1819 size_t psize, offset; 1820 u32 copy_limit; 1821 1822 /* ensure fitting the notsent_lowat() constraint */ 1823 copy_limit = mptcp_send_limit(sk); 1824 if (!copy_limit) 1825 goto wait_for_memory; 1826 1827 /* reuse tail pfrag, if possible, or carve a new one from the 1828 * page allocator 1829 */ 1830 dfrag = mptcp_pending_tail(sk); 1831 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); 1832 if (!dfrag_collapsed) { 1833 if (!mptcp_page_frag_refill(sk, pfrag)) 1834 goto wait_for_memory; 1835 1836 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); 1837 frag_truesize = dfrag->overhead; 1838 } 1839 1840 /* we do not bound vs wspace, to allow a single packet. 1841 * memory accounting will prevent execessive memory usage 1842 * anyway 1843 */ 1844 offset = dfrag->offset + dfrag->data_len; 1845 psize = pfrag->size - offset; 1846 psize = min_t(size_t, psize, msg_data_left(msg)); 1847 psize = min_t(size_t, psize, copy_limit); 1848 total_ts = psize + frag_truesize; 1849 1850 if (!sk_wmem_schedule(sk, total_ts)) 1851 goto wait_for_memory; 1852 1853 ret = do_copy_data_nocache(sk, psize, &msg->msg_iter, 1854 page_address(dfrag->page) + offset); 1855 if (ret) 1856 goto do_error; 1857 1858 /* data successfully copied into the write queue */ 1859 sk_forward_alloc_add(sk, -total_ts); 1860 copied += psize; 1861 dfrag->data_len += psize; 1862 frag_truesize += psize; 1863 pfrag->offset += frag_truesize; 1864 WRITE_ONCE(msk->write_seq, msk->write_seq + psize); 1865 1866 /* charge data on mptcp pending queue to the msk socket 1867 * Note: we charge such data both to sk and ssk 1868 */ 1869 sk_wmem_queued_add(sk, frag_truesize); 1870 if (!dfrag_collapsed) { 1871 get_page(dfrag->page); 1872 list_add_tail(&dfrag->list, &msk->rtx_queue); 1873 if (!msk->first_pending) 1874 WRITE_ONCE(msk->first_pending, dfrag); 1875 } 1876 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, 1877 dfrag->data_seq, dfrag->data_len, dfrag->already_sent, 1878 !dfrag_collapsed); 1879 1880 continue; 1881 1882 wait_for_memory: 1883 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1884 __mptcp_push_pending(sk, msg->msg_flags); 1885 ret = sk_stream_wait_memory(sk, &timeo); 1886 if (ret) 1887 goto do_error; 1888 } 1889 1890 if (copied) 1891 __mptcp_push_pending(sk, msg->msg_flags); 1892 1893 out: 1894 release_sock(sk); 1895 return copied; 1896 1897 do_error: 1898 if (copied) 1899 goto out; 1900 1901 copied = sk_stream_error(sk, msg->msg_flags, ret); 1902 goto out; 1903 } 1904 1905 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); 1906 1907 static int __mptcp_recvmsg_mskq(struct sock *sk, 1908 struct msghdr *msg, 1909 size_t len, int flags, 1910 struct scm_timestamping_internal *tss, 1911 int *cmsg_flags) 1912 { 1913 struct mptcp_sock *msk = mptcp_sk(sk); 1914 struct sk_buff *skb, *tmp; 1915 int copied = 0; 1916 1917 skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { 1918 u32 offset = MPTCP_SKB_CB(skb)->offset; 1919 u32 data_len = skb->len - offset; 1920 u32 count = min_t(size_t, len - copied, data_len); 1921 int err; 1922 1923 if (!(flags & MSG_TRUNC)) { 1924 err = skb_copy_datagram_msg(skb, offset, msg, count); 1925 if (unlikely(err < 0)) { 1926 if (!copied) 1927 return err; 1928 break; 1929 } 1930 } 1931 1932 if (MPTCP_SKB_CB(skb)->has_rxtstamp) { 1933 tcp_update_recv_tstamps(skb, tss); 1934 *cmsg_flags |= MPTCP_CMSG_TS; 1935 } 1936 1937 copied += count; 1938 1939 if (count < data_len) { 1940 if (!(flags & MSG_PEEK)) { 1941 MPTCP_SKB_CB(skb)->offset += count; 1942 MPTCP_SKB_CB(skb)->map_seq += count; 1943 msk->bytes_consumed += count; 1944 } 1945 break; 1946 } 1947 1948 if (!(flags & MSG_PEEK)) { 1949 /* avoid the indirect call, we know the destructor is sock_wfree */ 1950 skb->destructor = NULL; 1951 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1952 sk_mem_uncharge(sk, skb->truesize); 1953 __skb_unlink(skb, &sk->sk_receive_queue); 1954 __kfree_skb(skb); 1955 msk->bytes_consumed += count; 1956 } 1957 1958 if (copied >= len) 1959 break; 1960 } 1961 1962 mptcp_rcv_space_adjust(msk, copied); 1963 return copied; 1964 } 1965 1966 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. 1967 * 1968 * Only difference: Use highest rtt estimate of the subflows in use. 1969 */ 1970 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) 1971 { 1972 struct mptcp_subflow_context *subflow; 1973 struct sock *sk = (struct sock *)msk; 1974 u8 scaling_ratio = U8_MAX; 1975 u32 time, advmss = 1; 1976 u64 rtt_us, mstamp; 1977 1978 msk_owned_by_me(msk); 1979 1980 if (copied <= 0) 1981 return; 1982 1983 if (!msk->rcvspace_init) 1984 mptcp_rcv_space_init(msk, msk->first); 1985 1986 msk->rcvq_space.copied += copied; 1987 1988 mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); 1989 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); 1990 1991 rtt_us = msk->rcvq_space.rtt_us; 1992 if (rtt_us && time < (rtt_us >> 3)) 1993 return; 1994 1995 rtt_us = 0; 1996 mptcp_for_each_subflow(msk, subflow) { 1997 const struct tcp_sock *tp; 1998 u64 sf_rtt_us; 1999 u32 sf_advmss; 2000 2001 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); 2002 2003 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); 2004 sf_advmss = READ_ONCE(tp->advmss); 2005 2006 rtt_us = max(sf_rtt_us, rtt_us); 2007 advmss = max(sf_advmss, advmss); 2008 scaling_ratio = min(tp->scaling_ratio, scaling_ratio); 2009 } 2010 2011 msk->rcvq_space.rtt_us = rtt_us; 2012 msk->scaling_ratio = scaling_ratio; 2013 if (time < (rtt_us >> 3) || rtt_us == 0) 2014 return; 2015 2016 if (msk->rcvq_space.copied <= msk->rcvq_space.space) 2017 goto new_measure; 2018 2019 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && 2020 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 2021 u64 rcvwin, grow; 2022 int rcvbuf; 2023 2024 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; 2025 2026 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); 2027 2028 do_div(grow, msk->rcvq_space.space); 2029 rcvwin += (grow << 1); 2030 2031 rcvbuf = min_t(u64, mptcp_space_from_win(sk, rcvwin), 2032 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); 2033 2034 if (rcvbuf > sk->sk_rcvbuf) { 2035 u32 window_clamp; 2036 2037 window_clamp = mptcp_win_from_space(sk, rcvbuf); 2038 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 2039 2040 /* Make subflows follow along. If we do not do this, we 2041 * get drops at subflow level if skbs can't be moved to 2042 * the mptcp rx queue fast enough (announced rcv_win can 2043 * exceed ssk->sk_rcvbuf). 2044 */ 2045 mptcp_for_each_subflow(msk, subflow) { 2046 struct sock *ssk; 2047 bool slow; 2048 2049 ssk = mptcp_subflow_tcp_sock(subflow); 2050 slow = lock_sock_fast(ssk); 2051 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); 2052 WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp); 2053 if (tcp_can_send_ack(ssk)) 2054 tcp_cleanup_rbuf(ssk, 1); 2055 unlock_sock_fast(ssk, slow); 2056 } 2057 } 2058 } 2059 2060 msk->rcvq_space.space = msk->rcvq_space.copied; 2061 new_measure: 2062 msk->rcvq_space.copied = 0; 2063 msk->rcvq_space.time = mstamp; 2064 } 2065 2066 static struct mptcp_subflow_context * 2067 __mptcp_first_ready_from(struct mptcp_sock *msk, 2068 struct mptcp_subflow_context *subflow) 2069 { 2070 struct mptcp_subflow_context *start_subflow = subflow; 2071 2072 while (!READ_ONCE(subflow->data_avail)) { 2073 subflow = mptcp_next_subflow(msk, subflow); 2074 if (subflow == start_subflow) 2075 return NULL; 2076 } 2077 return subflow; 2078 } 2079 2080 static bool __mptcp_move_skbs(struct sock *sk) 2081 { 2082 struct mptcp_subflow_context *subflow; 2083 struct mptcp_sock *msk = mptcp_sk(sk); 2084 bool ret = false; 2085 2086 if (list_empty(&msk->conn_list)) 2087 return false; 2088 2089 /* verify we can move any data from the subflow, eventually updating */ 2090 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 2091 mptcp_for_each_subflow(msk, subflow) 2092 __mptcp_rcvbuf_update(sk, subflow->tcp_sock); 2093 2094 subflow = list_first_entry(&msk->conn_list, 2095 struct mptcp_subflow_context, node); 2096 for (;;) { 2097 struct sock *ssk; 2098 bool slowpath; 2099 2100 /* 2101 * As an optimization avoid traversing the subflows list 2102 * and ev. acquiring the subflow socket lock before baling out 2103 */ 2104 if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) 2105 break; 2106 2107 subflow = __mptcp_first_ready_from(msk, subflow); 2108 if (!subflow) 2109 break; 2110 2111 ssk = mptcp_subflow_tcp_sock(subflow); 2112 slowpath = lock_sock_fast(ssk); 2113 ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret; 2114 if (unlikely(ssk->sk_err)) 2115 __mptcp_error_report(sk); 2116 unlock_sock_fast(ssk, slowpath); 2117 2118 subflow = mptcp_next_subflow(msk, subflow); 2119 } 2120 2121 __mptcp_ofo_queue(msk); 2122 if (ret) 2123 mptcp_check_data_fin((struct sock *)msk); 2124 return ret; 2125 } 2126 2127 static unsigned int mptcp_inq_hint(const struct sock *sk) 2128 { 2129 const struct mptcp_sock *msk = mptcp_sk(sk); 2130 const struct sk_buff *skb; 2131 2132 skb = skb_peek(&sk->sk_receive_queue); 2133 if (skb) { 2134 u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq; 2135 2136 if (hint_val >= INT_MAX) 2137 return INT_MAX; 2138 2139 return (unsigned int)hint_val; 2140 } 2141 2142 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 2143 return 1; 2144 2145 return 0; 2146 } 2147 2148 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2149 int flags, int *addr_len) 2150 { 2151 struct mptcp_sock *msk = mptcp_sk(sk); 2152 struct scm_timestamping_internal tss; 2153 int copied = 0, cmsg_flags = 0; 2154 int target; 2155 long timeo; 2156 2157 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */ 2158 if (unlikely(flags & MSG_ERRQUEUE)) 2159 return inet_recv_error(sk, msg, len, addr_len); 2160 2161 lock_sock(sk); 2162 if (unlikely(sk->sk_state == TCP_LISTEN)) { 2163 copied = -ENOTCONN; 2164 goto out_err; 2165 } 2166 2167 mptcp_rps_record_subflows(msk); 2168 2169 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2170 2171 len = min_t(size_t, len, INT_MAX); 2172 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2173 2174 if (unlikely(msk->recvmsg_inq)) 2175 cmsg_flags = MPTCP_CMSG_INQ; 2176 2177 while (copied < len) { 2178 int err, bytes_read; 2179 2180 bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags); 2181 if (unlikely(bytes_read < 0)) { 2182 if (!copied) 2183 copied = bytes_read; 2184 goto out_err; 2185 } 2186 2187 copied += bytes_read; 2188 2189 if (skb_queue_empty(&sk->sk_receive_queue) && __mptcp_move_skbs(sk)) 2190 continue; 2191 2192 /* only the MPTCP socket status is relevant here. The exit 2193 * conditions mirror closely tcp_recvmsg() 2194 */ 2195 if (copied >= target) 2196 break; 2197 2198 if (copied) { 2199 if (sk->sk_err || 2200 sk->sk_state == TCP_CLOSE || 2201 (sk->sk_shutdown & RCV_SHUTDOWN) || 2202 !timeo || 2203 signal_pending(current)) 2204 break; 2205 } else { 2206 if (sk->sk_err) { 2207 copied = sock_error(sk); 2208 break; 2209 } 2210 2211 if (sk->sk_shutdown & RCV_SHUTDOWN) { 2212 /* race breaker: the shutdown could be after the 2213 * previous receive queue check 2214 */ 2215 if (__mptcp_move_skbs(sk)) 2216 continue; 2217 break; 2218 } 2219 2220 if (sk->sk_state == TCP_CLOSE) { 2221 copied = -ENOTCONN; 2222 break; 2223 } 2224 2225 if (!timeo) { 2226 copied = -EAGAIN; 2227 break; 2228 } 2229 2230 if (signal_pending(current)) { 2231 copied = sock_intr_errno(timeo); 2232 break; 2233 } 2234 } 2235 2236 pr_debug("block timeout %ld\n", timeo); 2237 mptcp_cleanup_rbuf(msk, copied); 2238 err = sk_wait_data(sk, &timeo, NULL); 2239 if (err < 0) { 2240 err = copied ? : err; 2241 goto out_err; 2242 } 2243 } 2244 2245 mptcp_cleanup_rbuf(msk, copied); 2246 2247 out_err: 2248 if (cmsg_flags && copied >= 0) { 2249 if (cmsg_flags & MPTCP_CMSG_TS) 2250 tcp_recv_timestamp(msg, sk, &tss); 2251 2252 if (cmsg_flags & MPTCP_CMSG_INQ) { 2253 unsigned int inq = mptcp_inq_hint(sk); 2254 2255 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); 2256 } 2257 } 2258 2259 pr_debug("msk=%p rx queue empty=%d copied=%d\n", 2260 msk, skb_queue_empty(&sk->sk_receive_queue), copied); 2261 2262 release_sock(sk); 2263 return copied; 2264 } 2265 2266 static void mptcp_retransmit_timer(struct timer_list *t) 2267 { 2268 struct inet_connection_sock *icsk = timer_container_of(icsk, t, 2269 icsk_retransmit_timer); 2270 struct sock *sk = &icsk->icsk_inet.sk; 2271 struct mptcp_sock *msk = mptcp_sk(sk); 2272 2273 bh_lock_sock(sk); 2274 if (!sock_owned_by_user(sk)) { 2275 /* we need a process context to retransmit */ 2276 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags)) 2277 mptcp_schedule_work(sk); 2278 } else { 2279 /* delegate our work to tcp_release_cb() */ 2280 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags); 2281 } 2282 bh_unlock_sock(sk); 2283 sock_put(sk); 2284 } 2285 2286 static void mptcp_tout_timer(struct timer_list *t) 2287 { 2288 struct sock *sk = timer_container_of(sk, t, sk_timer); 2289 2290 mptcp_schedule_work(sk); 2291 sock_put(sk); 2292 } 2293 2294 /* Find an idle subflow. Return NULL if there is unacked data at tcp 2295 * level. 2296 * 2297 * A backup subflow is returned only if that is the only kind available. 2298 */ 2299 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) 2300 { 2301 struct sock *backup = NULL, *pick = NULL; 2302 struct mptcp_subflow_context *subflow; 2303 int min_stale_count = INT_MAX; 2304 2305 mptcp_for_each_subflow(msk, subflow) { 2306 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2307 2308 if (!__mptcp_subflow_active(subflow)) 2309 continue; 2310 2311 /* still data outstanding at TCP level? skip this */ 2312 if (!tcp_rtx_and_write_queues_empty(ssk)) { 2313 mptcp_pm_subflow_chk_stale(msk, ssk); 2314 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); 2315 continue; 2316 } 2317 2318 if (subflow->backup || subflow->request_bkup) { 2319 if (!backup) 2320 backup = ssk; 2321 continue; 2322 } 2323 2324 if (!pick) 2325 pick = ssk; 2326 } 2327 2328 if (pick) 2329 return pick; 2330 2331 /* use backup only if there are no progresses anywhere */ 2332 return min_stale_count > 1 ? backup : NULL; 2333 } 2334 2335 bool __mptcp_retransmit_pending_data(struct sock *sk) 2336 { 2337 struct mptcp_data_frag *cur, *rtx_head; 2338 struct mptcp_sock *msk = mptcp_sk(sk); 2339 2340 if (__mptcp_check_fallback(msk)) 2341 return false; 2342 2343 /* the closing socket has some data untransmitted and/or unacked: 2344 * some data in the mptcp rtx queue has not really xmitted yet. 2345 * keep it simple and re-inject the whole mptcp level rtx queue 2346 */ 2347 mptcp_data_lock(sk); 2348 __mptcp_clean_una_wakeup(sk); 2349 rtx_head = mptcp_rtx_head(sk); 2350 if (!rtx_head) { 2351 mptcp_data_unlock(sk); 2352 return false; 2353 } 2354 2355 msk->recovery_snd_nxt = msk->snd_nxt; 2356 msk->recovery = true; 2357 mptcp_data_unlock(sk); 2358 2359 msk->first_pending = rtx_head; 2360 msk->snd_burst = 0; 2361 2362 /* be sure to clear the "sent status" on all re-injected fragments */ 2363 list_for_each_entry(cur, &msk->rtx_queue, list) { 2364 if (!cur->already_sent) 2365 break; 2366 cur->already_sent = 0; 2367 } 2368 2369 return true; 2370 } 2371 2372 /* flags for __mptcp_close_ssk() */ 2373 #define MPTCP_CF_PUSH BIT(1) 2374 #define MPTCP_CF_FASTCLOSE BIT(2) 2375 2376 /* be sure to send a reset only if the caller asked for it, also 2377 * clean completely the subflow status when the subflow reaches 2378 * TCP_CLOSE state 2379 */ 2380 static void __mptcp_subflow_disconnect(struct sock *ssk, 2381 struct mptcp_subflow_context *subflow, 2382 unsigned int flags) 2383 { 2384 if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || 2385 (flags & MPTCP_CF_FASTCLOSE)) { 2386 /* The MPTCP code never wait on the subflow sockets, TCP-level 2387 * disconnect should never fail 2388 */ 2389 WARN_ON_ONCE(tcp_disconnect(ssk, 0)); 2390 mptcp_subflow_ctx_reset(subflow); 2391 } else { 2392 tcp_shutdown(ssk, SEND_SHUTDOWN); 2393 } 2394 } 2395 2396 /* subflow sockets can be either outgoing (connect) or incoming 2397 * (accept). 2398 * 2399 * Outgoing subflows use in-kernel sockets. 2400 * Incoming subflows do not have their own 'struct socket' allocated, 2401 * so we need to use tcp_close() after detaching them from the mptcp 2402 * parent socket. 2403 */ 2404 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, 2405 struct mptcp_subflow_context *subflow, 2406 unsigned int flags) 2407 { 2408 struct mptcp_sock *msk = mptcp_sk(sk); 2409 bool dispose_it, need_push = false; 2410 2411 /* If the first subflow moved to a close state before accept, e.g. due 2412 * to an incoming reset or listener shutdown, the subflow socket is 2413 * already deleted by inet_child_forget() and the mptcp socket can't 2414 * survive too. 2415 */ 2416 if (msk->in_accept_queue && msk->first == ssk && 2417 (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) { 2418 /* ensure later check in mptcp_worker() will dispose the msk */ 2419 sock_set_flag(sk, SOCK_DEAD); 2420 mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1)); 2421 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); 2422 mptcp_subflow_drop_ctx(ssk); 2423 goto out_release; 2424 } 2425 2426 dispose_it = msk->free_first || ssk != msk->first; 2427 if (dispose_it) 2428 list_del(&subflow->node); 2429 2430 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); 2431 2432 if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) { 2433 /* be sure to force the tcp_close path 2434 * to generate the egress reset 2435 */ 2436 ssk->sk_lingertime = 0; 2437 sock_set_flag(ssk, SOCK_LINGER); 2438 subflow->send_fastclose = 1; 2439 } 2440 2441 need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); 2442 if (!dispose_it) { 2443 __mptcp_subflow_disconnect(ssk, subflow, flags); 2444 release_sock(ssk); 2445 2446 goto out; 2447 } 2448 2449 subflow->disposable = 1; 2450 2451 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops 2452 * the ssk has been already destroyed, we just need to release the 2453 * reference owned by msk; 2454 */ 2455 if (!inet_csk(ssk)->icsk_ulp_ops) { 2456 WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD)); 2457 kfree_rcu(subflow, rcu); 2458 } else { 2459 /* otherwise tcp will dispose of the ssk and subflow ctx */ 2460 __tcp_close(ssk, 0); 2461 2462 /* close acquired an extra ref */ 2463 __sock_put(ssk); 2464 } 2465 2466 out_release: 2467 __mptcp_subflow_error_report(sk, ssk); 2468 release_sock(ssk); 2469 2470 sock_put(ssk); 2471 2472 if (ssk == msk->first) 2473 WRITE_ONCE(msk->first, NULL); 2474 2475 out: 2476 __mptcp_sync_sndbuf(sk); 2477 if (need_push) 2478 __mptcp_push_pending(sk, 0); 2479 2480 /* Catch every 'all subflows closed' scenario, including peers silently 2481 * closing them, e.g. due to timeout. 2482 * For established sockets, allow an additional timeout before closing, 2483 * as the protocol can still create more subflows. 2484 */ 2485 if (list_is_singular(&msk->conn_list) && msk->first && 2486 inet_sk_state_load(msk->first) == TCP_CLOSE) { 2487 if (sk->sk_state != TCP_ESTABLISHED || 2488 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) { 2489 mptcp_set_state(sk, TCP_CLOSE); 2490 mptcp_close_wake_up(sk); 2491 } else { 2492 mptcp_start_tout_timer(sk); 2493 } 2494 } 2495 } 2496 2497 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, 2498 struct mptcp_subflow_context *subflow) 2499 { 2500 /* The first subflow can already be closed and still in the list */ 2501 if (subflow->close_event_done) 2502 return; 2503 2504 subflow->close_event_done = true; 2505 2506 if (sk->sk_state == TCP_ESTABLISHED) 2507 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); 2508 2509 /* subflow aborted before reaching the fully_established status 2510 * attempt the creation of the next subflow 2511 */ 2512 mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow); 2513 2514 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); 2515 } 2516 2517 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) 2518 { 2519 return 0; 2520 } 2521 2522 static void __mptcp_close_subflow(struct sock *sk) 2523 { 2524 struct mptcp_subflow_context *subflow, *tmp; 2525 struct mptcp_sock *msk = mptcp_sk(sk); 2526 2527 might_sleep(); 2528 2529 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 2530 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2531 int ssk_state = inet_sk_state_load(ssk); 2532 2533 if (ssk_state != TCP_CLOSE && 2534 (ssk_state != TCP_CLOSE_WAIT || 2535 inet_sk_state_load(sk) != TCP_ESTABLISHED)) 2536 continue; 2537 2538 /* 'subflow_data_ready' will re-sched once rx queue is empty */ 2539 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) 2540 continue; 2541 2542 mptcp_close_ssk(sk, ssk, subflow); 2543 } 2544 2545 } 2546 2547 static bool mptcp_close_tout_expired(const struct sock *sk) 2548 { 2549 if (!inet_csk(sk)->icsk_mtup.probe_timestamp || 2550 sk->sk_state == TCP_CLOSE) 2551 return false; 2552 2553 return time_after32(tcp_jiffies32, 2554 inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk)); 2555 } 2556 2557 static void mptcp_check_fastclose(struct mptcp_sock *msk) 2558 { 2559 struct mptcp_subflow_context *subflow, *tmp; 2560 struct sock *sk = (struct sock *)msk; 2561 2562 if (likely(!READ_ONCE(msk->rcv_fastclose))) 2563 return; 2564 2565 mptcp_token_destroy(msk); 2566 2567 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 2568 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 2569 bool slow; 2570 2571 slow = lock_sock_fast(tcp_sk); 2572 if (tcp_sk->sk_state != TCP_CLOSE) { 2573 mptcp_send_active_reset_reason(tcp_sk); 2574 tcp_set_state(tcp_sk, TCP_CLOSE); 2575 } 2576 unlock_sock_fast(tcp_sk, slow); 2577 } 2578 2579 /* Mirror the tcp_reset() error propagation */ 2580 switch (sk->sk_state) { 2581 case TCP_SYN_SENT: 2582 WRITE_ONCE(sk->sk_err, ECONNREFUSED); 2583 break; 2584 case TCP_CLOSE_WAIT: 2585 WRITE_ONCE(sk->sk_err, EPIPE); 2586 break; 2587 case TCP_CLOSE: 2588 return; 2589 default: 2590 WRITE_ONCE(sk->sk_err, ECONNRESET); 2591 } 2592 2593 mptcp_set_state(sk, TCP_CLOSE); 2594 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 2595 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 2596 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); 2597 2598 /* the calling mptcp_worker will properly destroy the socket */ 2599 if (sock_flag(sk, SOCK_DEAD)) 2600 return; 2601 2602 sk->sk_state_change(sk); 2603 sk_error_report(sk); 2604 } 2605 2606 static void __mptcp_retrans(struct sock *sk) 2607 { 2608 struct mptcp_sendmsg_info info = { .data_lock_held = true, }; 2609 struct mptcp_sock *msk = mptcp_sk(sk); 2610 struct mptcp_subflow_context *subflow; 2611 struct mptcp_data_frag *dfrag; 2612 struct sock *ssk; 2613 int ret, err; 2614 u16 len = 0; 2615 2616 mptcp_clean_una_wakeup(sk); 2617 2618 /* first check ssk: need to kick "stale" logic */ 2619 err = mptcp_sched_get_retrans(msk); 2620 dfrag = mptcp_rtx_head(sk); 2621 if (!dfrag) { 2622 if (mptcp_data_fin_enabled(msk)) { 2623 struct inet_connection_sock *icsk = inet_csk(sk); 2624 2625 WRITE_ONCE(icsk->icsk_retransmits, 2626 icsk->icsk_retransmits + 1); 2627 mptcp_set_datafin_timeout(sk); 2628 mptcp_send_ack(msk); 2629 2630 goto reset_timer; 2631 } 2632 2633 if (!mptcp_send_head(sk)) 2634 return; 2635 2636 goto reset_timer; 2637 } 2638 2639 if (err) 2640 goto reset_timer; 2641 2642 mptcp_for_each_subflow(msk, subflow) { 2643 if (READ_ONCE(subflow->scheduled)) { 2644 u16 copied = 0; 2645 2646 mptcp_subflow_set_scheduled(subflow, false); 2647 2648 ssk = mptcp_subflow_tcp_sock(subflow); 2649 2650 lock_sock(ssk); 2651 2652 /* limit retransmission to the bytes already sent on some subflows */ 2653 info.sent = 0; 2654 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : 2655 dfrag->already_sent; 2656 2657 /* 2658 * make the whole retrans decision, xmit, disallow 2659 * fallback atomic 2660 */ 2661 spin_lock_bh(&msk->fallback_lock); 2662 if (__mptcp_check_fallback(msk)) { 2663 spin_unlock_bh(&msk->fallback_lock); 2664 release_sock(ssk); 2665 return; 2666 } 2667 2668 while (info.sent < info.limit) { 2669 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); 2670 if (ret <= 0) 2671 break; 2672 2673 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); 2674 copied += ret; 2675 info.sent += ret; 2676 } 2677 if (copied) { 2678 len = max(copied, len); 2679 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, 2680 info.size_goal); 2681 msk->allow_infinite_fallback = false; 2682 } 2683 spin_unlock_bh(&msk->fallback_lock); 2684 2685 release_sock(ssk); 2686 } 2687 } 2688 2689 msk->bytes_retrans += len; 2690 dfrag->already_sent = max(dfrag->already_sent, len); 2691 2692 reset_timer: 2693 mptcp_check_and_set_pending(sk); 2694 2695 if (!mptcp_rtx_timer_pending(sk)) 2696 mptcp_reset_rtx_timer(sk); 2697 } 2698 2699 /* schedule the timeout timer for the relevant event: either close timeout 2700 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one 2701 */ 2702 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) 2703 { 2704 struct sock *sk = (struct sock *)msk; 2705 unsigned long timeout, close_timeout; 2706 2707 if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp) 2708 return; 2709 2710 close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp - 2711 tcp_jiffies32 + jiffies + mptcp_close_timeout(sk); 2712 2713 /* the close timeout takes precedence on the fail one, and here at least one of 2714 * them is active 2715 */ 2716 timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout; 2717 2718 sk_reset_timer(sk, &sk->sk_timer, timeout); 2719 } 2720 2721 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) 2722 { 2723 struct sock *ssk = msk->first; 2724 bool slow; 2725 2726 if (!ssk) 2727 return; 2728 2729 pr_debug("MP_FAIL doesn't respond, reset the subflow\n"); 2730 2731 slow = lock_sock_fast(ssk); 2732 mptcp_subflow_reset(ssk); 2733 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); 2734 unlock_sock_fast(ssk, slow); 2735 } 2736 2737 static void mptcp_do_fastclose(struct sock *sk) 2738 { 2739 struct mptcp_subflow_context *subflow, *tmp; 2740 struct mptcp_sock *msk = mptcp_sk(sk); 2741 2742 mptcp_set_state(sk, TCP_CLOSE); 2743 mptcp_for_each_subflow_safe(msk, subflow, tmp) 2744 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), 2745 subflow, MPTCP_CF_FASTCLOSE); 2746 } 2747 2748 static void mptcp_worker(struct work_struct *work) 2749 { 2750 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); 2751 struct sock *sk = (struct sock *)msk; 2752 unsigned long fail_tout; 2753 int state; 2754 2755 lock_sock(sk); 2756 state = sk->sk_state; 2757 if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN))) 2758 goto unlock; 2759 2760 mptcp_check_fastclose(msk); 2761 2762 mptcp_pm_worker(msk); 2763 2764 mptcp_check_send_data_fin(sk); 2765 mptcp_check_data_fin_ack(sk); 2766 mptcp_check_data_fin(sk); 2767 2768 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 2769 __mptcp_close_subflow(sk); 2770 2771 if (mptcp_close_tout_expired(sk)) { 2772 mptcp_do_fastclose(sk); 2773 mptcp_close_wake_up(sk); 2774 } 2775 2776 if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) { 2777 __mptcp_destroy_sock(sk); 2778 goto unlock; 2779 } 2780 2781 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) 2782 __mptcp_retrans(sk); 2783 2784 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0; 2785 if (fail_tout && time_after(jiffies, fail_tout)) 2786 mptcp_mp_fail_no_response(msk); 2787 2788 unlock: 2789 release_sock(sk); 2790 sock_put(sk); 2791 } 2792 2793 static void __mptcp_init_sock(struct sock *sk) 2794 { 2795 struct mptcp_sock *msk = mptcp_sk(sk); 2796 2797 INIT_LIST_HEAD(&msk->conn_list); 2798 INIT_LIST_HEAD(&msk->join_list); 2799 INIT_LIST_HEAD(&msk->rtx_queue); 2800 INIT_WORK(&msk->work, mptcp_worker); 2801 msk->out_of_order_queue = RB_ROOT; 2802 msk->first_pending = NULL; 2803 msk->timer_ival = TCP_RTO_MIN; 2804 msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; 2805 2806 WRITE_ONCE(msk->first, NULL); 2807 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; 2808 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); 2809 msk->allow_infinite_fallback = true; 2810 msk->allow_subflows = true; 2811 msk->recovery = false; 2812 msk->subflow_id = 1; 2813 msk->last_data_sent = tcp_jiffies32; 2814 msk->last_data_recv = tcp_jiffies32; 2815 msk->last_ack_recv = tcp_jiffies32; 2816 2817 mptcp_pm_data_init(msk); 2818 spin_lock_init(&msk->fallback_lock); 2819 2820 /* re-use the csk retrans timer for MPTCP-level retrans */ 2821 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); 2822 timer_setup(&sk->sk_timer, mptcp_tout_timer, 0); 2823 } 2824 2825 static void mptcp_ca_reset(struct sock *sk) 2826 { 2827 struct inet_connection_sock *icsk = inet_csk(sk); 2828 2829 tcp_assign_congestion_control(sk); 2830 strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name, 2831 sizeof(mptcp_sk(sk)->ca_name)); 2832 2833 /* no need to keep a reference to the ops, the name will suffice */ 2834 tcp_cleanup_congestion_control(sk); 2835 icsk->icsk_ca_ops = NULL; 2836 } 2837 2838 static int mptcp_init_sock(struct sock *sk) 2839 { 2840 struct net *net = sock_net(sk); 2841 int ret; 2842 2843 __mptcp_init_sock(sk); 2844 2845 if (!mptcp_is_enabled(net)) 2846 return -ENOPROTOOPT; 2847 2848 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) 2849 return -ENOMEM; 2850 2851 rcu_read_lock(); 2852 ret = mptcp_init_sched(mptcp_sk(sk), 2853 mptcp_sched_find(mptcp_get_scheduler(net))); 2854 rcu_read_unlock(); 2855 if (ret) 2856 return ret; 2857 2858 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); 2859 2860 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will 2861 * propagate the correct value 2862 */ 2863 mptcp_ca_reset(sk); 2864 2865 sk_sockets_allocated_inc(sk); 2866 sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]); 2867 sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]); 2868 2869 return 0; 2870 } 2871 2872 static void __mptcp_clear_xmit(struct sock *sk) 2873 { 2874 struct mptcp_sock *msk = mptcp_sk(sk); 2875 struct mptcp_data_frag *dtmp, *dfrag; 2876 2877 WRITE_ONCE(msk->first_pending, NULL); 2878 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) 2879 dfrag_clear(sk, dfrag); 2880 } 2881 2882 void mptcp_cancel_work(struct sock *sk) 2883 { 2884 struct mptcp_sock *msk = mptcp_sk(sk); 2885 2886 if (cancel_work_sync(&msk->work)) 2887 __sock_put(sk); 2888 } 2889 2890 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) 2891 { 2892 lock_sock(ssk); 2893 2894 switch (ssk->sk_state) { 2895 case TCP_LISTEN: 2896 if (!(how & RCV_SHUTDOWN)) 2897 break; 2898 fallthrough; 2899 case TCP_SYN_SENT: 2900 WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK)); 2901 break; 2902 default: 2903 if (__mptcp_check_fallback(mptcp_sk(sk))) { 2904 pr_debug("Fallback\n"); 2905 ssk->sk_shutdown |= how; 2906 tcp_shutdown(ssk, how); 2907 2908 /* simulate the data_fin ack reception to let the state 2909 * machine move forward 2910 */ 2911 WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt); 2912 mptcp_schedule_work(sk); 2913 } else { 2914 pr_debug("Sending DATA_FIN on subflow %p\n", ssk); 2915 tcp_send_ack(ssk); 2916 if (!mptcp_rtx_timer_pending(sk)) 2917 mptcp_reset_rtx_timer(sk); 2918 } 2919 break; 2920 } 2921 2922 release_sock(ssk); 2923 } 2924 2925 void mptcp_set_state(struct sock *sk, int state) 2926 { 2927 int oldstate = sk->sk_state; 2928 2929 switch (state) { 2930 case TCP_ESTABLISHED: 2931 if (oldstate != TCP_ESTABLISHED) 2932 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); 2933 break; 2934 case TCP_CLOSE_WAIT: 2935 /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state: 2936 * MPTCP "accepted" sockets will be created later on. So no 2937 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT. 2938 */ 2939 break; 2940 default: 2941 if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) 2942 MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); 2943 } 2944 2945 inet_sk_state_store(sk, state); 2946 } 2947 2948 static const unsigned char new_state[16] = { 2949 /* current state: new state: action: */ 2950 [0 /* (Invalid) */] = TCP_CLOSE, 2951 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2952 [TCP_SYN_SENT] = TCP_CLOSE, 2953 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2954 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 2955 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 2956 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ 2957 [TCP_CLOSE] = TCP_CLOSE, 2958 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 2959 [TCP_LAST_ACK] = TCP_LAST_ACK, 2960 [TCP_LISTEN] = TCP_CLOSE, 2961 [TCP_CLOSING] = TCP_CLOSING, 2962 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 2963 }; 2964 2965 static int mptcp_close_state(struct sock *sk) 2966 { 2967 int next = (int)new_state[sk->sk_state]; 2968 int ns = next & TCP_STATE_MASK; 2969 2970 mptcp_set_state(sk, ns); 2971 2972 return next & TCP_ACTION_FIN; 2973 } 2974 2975 static void mptcp_check_send_data_fin(struct sock *sk) 2976 { 2977 struct mptcp_subflow_context *subflow; 2978 struct mptcp_sock *msk = mptcp_sk(sk); 2979 2980 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n", 2981 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), 2982 msk->snd_nxt, msk->write_seq); 2983 2984 /* we still need to enqueue subflows or not really shutting down, 2985 * skip this 2986 */ 2987 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || 2988 mptcp_send_head(sk)) 2989 return; 2990 2991 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 2992 2993 mptcp_for_each_subflow(msk, subflow) { 2994 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 2995 2996 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN); 2997 } 2998 } 2999 3000 static void __mptcp_wr_shutdown(struct sock *sk) 3001 { 3002 struct mptcp_sock *msk = mptcp_sk(sk); 3003 3004 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n", 3005 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, 3006 !!mptcp_send_head(sk)); 3007 3008 /* will be ignored by fallback sockets */ 3009 WRITE_ONCE(msk->write_seq, msk->write_seq + 1); 3010 WRITE_ONCE(msk->snd_data_fin_enable, 1); 3011 3012 mptcp_check_send_data_fin(sk); 3013 } 3014 3015 static void __mptcp_destroy_sock(struct sock *sk) 3016 { 3017 struct mptcp_sock *msk = mptcp_sk(sk); 3018 3019 pr_debug("msk=%p\n", msk); 3020 3021 might_sleep(); 3022 3023 mptcp_stop_rtx_timer(sk); 3024 sk_stop_timer(sk, &sk->sk_timer); 3025 msk->pm.status = 0; 3026 mptcp_release_sched(msk); 3027 3028 sk->sk_prot->destroy(sk); 3029 3030 sk_stream_kill_queues(sk); 3031 xfrm_sk_free_policy(sk); 3032 3033 sock_put(sk); 3034 } 3035 3036 void __mptcp_unaccepted_force_close(struct sock *sk) 3037 { 3038 sock_set_flag(sk, SOCK_DEAD); 3039 mptcp_do_fastclose(sk); 3040 __mptcp_destroy_sock(sk); 3041 } 3042 3043 static __poll_t mptcp_check_readable(struct sock *sk) 3044 { 3045 return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0; 3046 } 3047 3048 static void mptcp_check_listen_stop(struct sock *sk) 3049 { 3050 struct sock *ssk; 3051 3052 if (inet_sk_state_load(sk) != TCP_LISTEN) 3053 return; 3054 3055 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3056 ssk = mptcp_sk(sk)->first; 3057 if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN)) 3058 return; 3059 3060 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); 3061 tcp_set_state(ssk, TCP_CLOSE); 3062 mptcp_subflow_queue_clean(sk, ssk); 3063 inet_csk_listen_stop(ssk); 3064 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED); 3065 release_sock(ssk); 3066 } 3067 3068 bool __mptcp_close(struct sock *sk, long timeout) 3069 { 3070 struct mptcp_subflow_context *subflow; 3071 struct mptcp_sock *msk = mptcp_sk(sk); 3072 bool do_cancel_work = false; 3073 int subflows_alive = 0; 3074 3075 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 3076 3077 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { 3078 mptcp_check_listen_stop(sk); 3079 mptcp_set_state(sk, TCP_CLOSE); 3080 goto cleanup; 3081 } 3082 3083 if (mptcp_data_avail(msk) || timeout < 0) { 3084 /* If the msk has read data, or the caller explicitly ask it, 3085 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose 3086 */ 3087 mptcp_do_fastclose(sk); 3088 timeout = 0; 3089 } else if (mptcp_close_state(sk)) { 3090 __mptcp_wr_shutdown(sk); 3091 } 3092 3093 sk_stream_wait_close(sk, timeout); 3094 3095 cleanup: 3096 /* orphan all the subflows */ 3097 mptcp_for_each_subflow(msk, subflow) { 3098 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 3099 bool slow = lock_sock_fast_nested(ssk); 3100 3101 subflows_alive += ssk->sk_state != TCP_CLOSE; 3102 3103 /* since the close timeout takes precedence on the fail one, 3104 * cancel the latter 3105 */ 3106 if (ssk == msk->first) 3107 subflow->fail_tout = 0; 3108 3109 /* detach from the parent socket, but allow data_ready to 3110 * push incoming data into the mptcp stack, to properly ack it 3111 */ 3112 ssk->sk_socket = NULL; 3113 ssk->sk_wq = NULL; 3114 unlock_sock_fast(ssk, slow); 3115 } 3116 sock_orphan(sk); 3117 3118 /* all the subflows are closed, only timeout can change the msk 3119 * state, let's not keep resources busy for no reasons 3120 */ 3121 if (subflows_alive == 0) 3122 mptcp_set_state(sk, TCP_CLOSE); 3123 3124 sock_hold(sk); 3125 pr_debug("msk=%p state=%d\n", sk, sk->sk_state); 3126 mptcp_pm_connection_closed(msk); 3127 3128 if (sk->sk_state == TCP_CLOSE) { 3129 __mptcp_destroy_sock(sk); 3130 do_cancel_work = true; 3131 } else { 3132 mptcp_start_tout_timer(sk); 3133 } 3134 3135 return do_cancel_work; 3136 } 3137 3138 static void mptcp_close(struct sock *sk, long timeout) 3139 { 3140 bool do_cancel_work; 3141 3142 lock_sock(sk); 3143 3144 do_cancel_work = __mptcp_close(sk, timeout); 3145 release_sock(sk); 3146 if (do_cancel_work) 3147 mptcp_cancel_work(sk); 3148 3149 sock_put(sk); 3150 } 3151 3152 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) 3153 { 3154 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3155 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); 3156 struct ipv6_pinfo *msk6 = inet6_sk(msk); 3157 3158 msk->sk_v6_daddr = ssk->sk_v6_daddr; 3159 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; 3160 3161 if (msk6 && ssk6) { 3162 msk6->saddr = ssk6->saddr; 3163 msk6->flow_label = ssk6->flow_label; 3164 } 3165 #endif 3166 3167 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; 3168 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; 3169 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; 3170 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 3171 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 3172 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 3173 } 3174 3175 static int mptcp_disconnect(struct sock *sk, int flags) 3176 { 3177 struct mptcp_sock *msk = mptcp_sk(sk); 3178 3179 /* We are on the fastopen error path. We can't call straight into the 3180 * subflows cleanup code due to lock nesting (we are already under 3181 * msk->firstsocket lock). 3182 */ 3183 if (msk->fastopening) 3184 return -EBUSY; 3185 3186 mptcp_check_listen_stop(sk); 3187 mptcp_set_state(sk, TCP_CLOSE); 3188 3189 mptcp_stop_rtx_timer(sk); 3190 mptcp_stop_tout_timer(sk); 3191 3192 mptcp_pm_connection_closed(msk); 3193 3194 /* msk->subflow is still intact, the following will not free the first 3195 * subflow 3196 */ 3197 mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); 3198 3199 /* The first subflow is already in TCP_CLOSE status, the following 3200 * can't overlap with a fallback anymore 3201 */ 3202 spin_lock_bh(&msk->fallback_lock); 3203 msk->allow_subflows = true; 3204 msk->allow_infinite_fallback = true; 3205 WRITE_ONCE(msk->flags, 0); 3206 spin_unlock_bh(&msk->fallback_lock); 3207 3208 msk->cb_flags = 0; 3209 msk->recovery = false; 3210 WRITE_ONCE(msk->can_ack, false); 3211 WRITE_ONCE(msk->fully_established, false); 3212 WRITE_ONCE(msk->rcv_data_fin, false); 3213 WRITE_ONCE(msk->snd_data_fin_enable, false); 3214 WRITE_ONCE(msk->rcv_fastclose, false); 3215 WRITE_ONCE(msk->use_64bit_ack, false); 3216 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); 3217 mptcp_pm_data_reset(msk); 3218 mptcp_ca_reset(sk); 3219 msk->bytes_consumed = 0; 3220 msk->bytes_acked = 0; 3221 msk->bytes_received = 0; 3222 msk->bytes_sent = 0; 3223 msk->bytes_retrans = 0; 3224 msk->rcvspace_init = 0; 3225 3226 WRITE_ONCE(sk->sk_shutdown, 0); 3227 sk_error_report(sk); 3228 return 0; 3229 } 3230 3231 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3232 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) 3233 { 3234 struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk); 3235 3236 return &msk6->np; 3237 } 3238 3239 static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk) 3240 { 3241 const struct ipv6_pinfo *np = inet6_sk(sk); 3242 struct ipv6_txoptions *opt; 3243 struct ipv6_pinfo *newnp; 3244 3245 newnp = inet6_sk(newsk); 3246 3247 rcu_read_lock(); 3248 opt = rcu_dereference(np->opt); 3249 if (opt) { 3250 opt = ipv6_dup_options(newsk, opt); 3251 if (!opt) 3252 net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__); 3253 } 3254 RCU_INIT_POINTER(newnp->opt, opt); 3255 rcu_read_unlock(); 3256 } 3257 #endif 3258 3259 static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk) 3260 { 3261 struct ip_options_rcu *inet_opt, *newopt = NULL; 3262 const struct inet_sock *inet = inet_sk(sk); 3263 struct inet_sock *newinet; 3264 3265 newinet = inet_sk(newsk); 3266 3267 rcu_read_lock(); 3268 inet_opt = rcu_dereference(inet->inet_opt); 3269 if (inet_opt) { 3270 newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) + 3271 inet_opt->opt.optlen, GFP_ATOMIC); 3272 if (!newopt) 3273 net_warn_ratelimited("%s: Failed to copy ip options\n", __func__); 3274 } 3275 RCU_INIT_POINTER(newinet->inet_opt, newopt); 3276 rcu_read_unlock(); 3277 } 3278 3279 struct sock *mptcp_sk_clone_init(const struct sock *sk, 3280 const struct mptcp_options_received *mp_opt, 3281 struct sock *ssk, 3282 struct request_sock *req) 3283 { 3284 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 3285 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); 3286 struct mptcp_subflow_context *subflow; 3287 struct mptcp_sock *msk; 3288 3289 if (!nsk) 3290 return NULL; 3291 3292 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3293 if (nsk->sk_family == AF_INET6) 3294 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); 3295 #endif 3296 3297 __mptcp_init_sock(nsk); 3298 3299 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3300 if (nsk->sk_family == AF_INET6) 3301 mptcp_copy_ip6_options(nsk, sk); 3302 else 3303 #endif 3304 mptcp_copy_ip_options(nsk, sk); 3305 3306 msk = mptcp_sk(nsk); 3307 WRITE_ONCE(msk->local_key, subflow_req->local_key); 3308 WRITE_ONCE(msk->token, subflow_req->token); 3309 msk->in_accept_queue = 1; 3310 WRITE_ONCE(msk->fully_established, false); 3311 if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) 3312 WRITE_ONCE(msk->csum_enabled, true); 3313 3314 WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1); 3315 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 3316 WRITE_ONCE(msk->snd_una, msk->write_seq); 3317 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); 3318 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; 3319 mptcp_init_sched(msk, mptcp_sk(sk)->sched); 3320 3321 /* passive msk is created after the first/MPC subflow */ 3322 msk->subflow_id = 2; 3323 3324 sock_reset_flag(nsk, SOCK_RCU_FREE); 3325 security_inet_csk_clone(nsk, req); 3326 3327 /* this can't race with mptcp_close(), as the msk is 3328 * not yet exposted to user-space 3329 */ 3330 mptcp_set_state(nsk, TCP_ESTABLISHED); 3331 3332 /* The msk maintain a ref to each subflow in the connections list */ 3333 WRITE_ONCE(msk->first, ssk); 3334 subflow = mptcp_subflow_ctx(ssk); 3335 list_add(&subflow->node, &msk->conn_list); 3336 sock_hold(ssk); 3337 3338 /* new mpc subflow takes ownership of the newly 3339 * created mptcp socket 3340 */ 3341 mptcp_token_accept(subflow_req, msk); 3342 3343 /* set msk addresses early to ensure mptcp_pm_get_local_id() 3344 * uses the correct data 3345 */ 3346 mptcp_copy_inaddrs(nsk, ssk); 3347 __mptcp_propagate_sndbuf(nsk, ssk); 3348 3349 mptcp_rcv_space_init(msk, ssk); 3350 3351 if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK) 3352 __mptcp_subflow_fully_established(msk, subflow, mp_opt); 3353 bh_unlock_sock(nsk); 3354 3355 /* note: the newly allocated socket refcount is 2 now */ 3356 return nsk; 3357 } 3358 3359 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) 3360 { 3361 const struct tcp_sock *tp = tcp_sk(ssk); 3362 3363 msk->rcvspace_init = 1; 3364 msk->rcvq_space.copied = 0; 3365 msk->rcvq_space.rtt_us = 0; 3366 3367 msk->rcvq_space.time = tp->tcp_mstamp; 3368 3369 /* initial rcv_space offering made to peer */ 3370 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, 3371 TCP_INIT_CWND * tp->advmss); 3372 if (msk->rcvq_space.space == 0) 3373 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; 3374 } 3375 3376 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) 3377 { 3378 struct mptcp_subflow_context *subflow, *tmp; 3379 struct sock *sk = (struct sock *)msk; 3380 3381 __mptcp_clear_xmit(sk); 3382 3383 /* join list will be eventually flushed (with rst) at sock lock release time */ 3384 mptcp_for_each_subflow_safe(msk, subflow, tmp) 3385 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); 3386 3387 __skb_queue_purge(&sk->sk_receive_queue); 3388 skb_rbtree_purge(&msk->out_of_order_queue); 3389 3390 /* move all the rx fwd alloc into the sk_mem_reclaim_final in 3391 * inet_sock_destruct() will dispose it 3392 */ 3393 mptcp_token_destroy(msk); 3394 mptcp_pm_destroy(msk); 3395 } 3396 3397 static void mptcp_destroy(struct sock *sk) 3398 { 3399 struct mptcp_sock *msk = mptcp_sk(sk); 3400 3401 /* allow the following to close even the initial subflow */ 3402 msk->free_first = 1; 3403 mptcp_destroy_common(msk, 0); 3404 sk_sockets_allocated_dec(sk); 3405 } 3406 3407 void __mptcp_data_acked(struct sock *sk) 3408 { 3409 if (!sock_owned_by_user(sk)) 3410 __mptcp_clean_una(sk); 3411 else 3412 __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags); 3413 } 3414 3415 void __mptcp_check_push(struct sock *sk, struct sock *ssk) 3416 { 3417 if (!mptcp_send_head(sk)) 3418 return; 3419 3420 if (!sock_owned_by_user(sk)) 3421 __mptcp_subflow_push_pending(sk, ssk, false); 3422 else 3423 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); 3424 } 3425 3426 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \ 3427 BIT(MPTCP_RETRANSMIT) | \ 3428 BIT(MPTCP_FLUSH_JOIN_LIST) | \ 3429 BIT(MPTCP_DEQUEUE)) 3430 3431 /* processes deferred events and flush wmem */ 3432 static void mptcp_release_cb(struct sock *sk) 3433 __must_hold(&sk->sk_lock.slock) 3434 { 3435 struct mptcp_sock *msk = mptcp_sk(sk); 3436 3437 for (;;) { 3438 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); 3439 struct list_head join_list; 3440 3441 if (!flags) 3442 break; 3443 3444 INIT_LIST_HEAD(&join_list); 3445 list_splice_init(&msk->join_list, &join_list); 3446 3447 /* the following actions acquire the subflow socket lock 3448 * 3449 * 1) can't be invoked in atomic scope 3450 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX 3451 * datapath acquires the msk socket spinlock while helding 3452 * the subflow socket lock 3453 */ 3454 msk->cb_flags &= ~flags; 3455 spin_unlock_bh(&sk->sk_lock.slock); 3456 3457 if (flags & BIT(MPTCP_FLUSH_JOIN_LIST)) 3458 __mptcp_flush_join_list(sk, &join_list); 3459 if (flags & BIT(MPTCP_PUSH_PENDING)) 3460 __mptcp_push_pending(sk, 0); 3461 if (flags & BIT(MPTCP_RETRANSMIT)) 3462 __mptcp_retrans(sk); 3463 if ((flags & BIT(MPTCP_DEQUEUE)) && __mptcp_move_skbs(sk)) { 3464 /* notify ack seq update */ 3465 mptcp_cleanup_rbuf(msk, 0); 3466 sk->sk_data_ready(sk); 3467 } 3468 3469 cond_resched(); 3470 spin_lock_bh(&sk->sk_lock.slock); 3471 } 3472 3473 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) 3474 __mptcp_clean_una_wakeup(sk); 3475 if (unlikely(msk->cb_flags)) { 3476 /* be sure to sync the msk state before taking actions 3477 * depending on sk_state (MPTCP_ERROR_REPORT) 3478 * On sk release avoid actions depending on the first subflow 3479 */ 3480 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) 3481 __mptcp_sync_state(sk, msk->pending_state); 3482 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) 3483 __mptcp_error_report(sk); 3484 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) 3485 __mptcp_sync_sndbuf(sk); 3486 } 3487 } 3488 3489 /* MP_JOIN client subflow must wait for 4th ack before sending any data: 3490 * TCP can't schedule delack timer before the subflow is fully established. 3491 * MPTCP uses the delack timer to do 3rd ack retransmissions 3492 */ 3493 static void schedule_3rdack_retransmission(struct sock *ssk) 3494 { 3495 struct inet_connection_sock *icsk = inet_csk(ssk); 3496 struct tcp_sock *tp = tcp_sk(ssk); 3497 unsigned long timeout; 3498 3499 if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established)) 3500 return; 3501 3502 /* reschedule with a timeout above RTT, as we must look only for drop */ 3503 if (tp->srtt_us) 3504 timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1)); 3505 else 3506 timeout = TCP_TIMEOUT_INIT; 3507 timeout += jiffies; 3508 3509 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); 3510 smp_store_release(&icsk->icsk_ack.pending, 3511 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); 3512 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); 3513 } 3514 3515 void mptcp_subflow_process_delegated(struct sock *ssk, long status) 3516 { 3517 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 3518 struct sock *sk = subflow->conn; 3519 3520 if (status & BIT(MPTCP_DELEGATE_SEND)) { 3521 mptcp_data_lock(sk); 3522 if (!sock_owned_by_user(sk)) 3523 __mptcp_subflow_push_pending(sk, ssk, true); 3524 else 3525 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); 3526 mptcp_data_unlock(sk); 3527 } 3528 if (status & BIT(MPTCP_DELEGATE_SNDBUF)) { 3529 mptcp_data_lock(sk); 3530 if (!sock_owned_by_user(sk)) 3531 __mptcp_sync_sndbuf(sk); 3532 else 3533 __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags); 3534 mptcp_data_unlock(sk); 3535 } 3536 if (status & BIT(MPTCP_DELEGATE_ACK)) 3537 schedule_3rdack_retransmission(ssk); 3538 } 3539 3540 static int mptcp_hash(struct sock *sk) 3541 { 3542 /* should never be called, 3543 * we hash the TCP subflows not the MPTCP socket 3544 */ 3545 WARN_ON_ONCE(1); 3546 return 0; 3547 } 3548 3549 static void mptcp_unhash(struct sock *sk) 3550 { 3551 /* called from sk_common_release(), but nothing to do here */ 3552 } 3553 3554 static int mptcp_get_port(struct sock *sk, unsigned short snum) 3555 { 3556 struct mptcp_sock *msk = mptcp_sk(sk); 3557 3558 pr_debug("msk=%p, ssk=%p\n", msk, msk->first); 3559 if (WARN_ON_ONCE(!msk->first)) 3560 return -EINVAL; 3561 3562 return inet_csk_get_port(msk->first, snum); 3563 } 3564 3565 void mptcp_finish_connect(struct sock *ssk) 3566 { 3567 struct mptcp_subflow_context *subflow; 3568 struct mptcp_sock *msk; 3569 struct sock *sk; 3570 3571 subflow = mptcp_subflow_ctx(ssk); 3572 sk = subflow->conn; 3573 msk = mptcp_sk(sk); 3574 3575 pr_debug("msk=%p, token=%u\n", sk, subflow->token); 3576 3577 subflow->map_seq = subflow->iasn; 3578 subflow->map_subflow_seq = 1; 3579 3580 /* the socket is not connected yet, no msk/subflow ops can access/race 3581 * accessing the field below 3582 */ 3583 WRITE_ONCE(msk->local_key, subflow->local_key); 3584 3585 mptcp_pm_new_connection(msk, ssk, 0); 3586 } 3587 3588 void mptcp_sock_graft(struct sock *sk, struct socket *parent) 3589 { 3590 write_lock_bh(&sk->sk_callback_lock); 3591 rcu_assign_pointer(sk->sk_wq, &parent->wq); 3592 sk_set_socket(sk, parent); 3593 write_unlock_bh(&sk->sk_callback_lock); 3594 } 3595 3596 bool mptcp_finish_join(struct sock *ssk) 3597 { 3598 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 3599 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 3600 struct sock *parent = (void *)msk; 3601 bool ret = true; 3602 3603 pr_debug("msk=%p, subflow=%p\n", msk, subflow); 3604 3605 /* mptcp socket already closing? */ 3606 if (!mptcp_is_fully_established(parent)) { 3607 subflow->reset_reason = MPTCP_RST_EMPTCP; 3608 return false; 3609 } 3610 3611 /* active subflow, already present inside the conn_list */ 3612 if (!list_empty(&subflow->node)) { 3613 spin_lock_bh(&msk->fallback_lock); 3614 if (!msk->allow_subflows) { 3615 spin_unlock_bh(&msk->fallback_lock); 3616 return false; 3617 } 3618 mptcp_subflow_joined(msk, ssk); 3619 spin_unlock_bh(&msk->fallback_lock); 3620 mptcp_propagate_sndbuf(parent, ssk); 3621 return true; 3622 } 3623 3624 if (!mptcp_pm_allow_new_subflow(msk)) { 3625 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_JOINREJECTED); 3626 goto err_prohibited; 3627 } 3628 3629 /* If we can't acquire msk socket lock here, let the release callback 3630 * handle it 3631 */ 3632 mptcp_data_lock(parent); 3633 if (!sock_owned_by_user(parent)) { 3634 ret = __mptcp_finish_join(msk, ssk); 3635 if (ret) { 3636 sock_hold(ssk); 3637 list_add_tail(&subflow->node, &msk->conn_list); 3638 } 3639 } else { 3640 sock_hold(ssk); 3641 list_add_tail(&subflow->node, &msk->join_list); 3642 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); 3643 } 3644 mptcp_data_unlock(parent); 3645 3646 if (!ret) { 3647 err_prohibited: 3648 subflow->reset_reason = MPTCP_RST_EPROHIBIT; 3649 return false; 3650 } 3651 3652 return true; 3653 } 3654 3655 static void mptcp_shutdown(struct sock *sk, int how) 3656 { 3657 pr_debug("sk=%p, how=%d\n", sk, how); 3658 3659 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) 3660 __mptcp_wr_shutdown(sk); 3661 } 3662 3663 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) 3664 { 3665 const struct sock *sk = (void *)msk; 3666 u64 delta; 3667 3668 if (sk->sk_state == TCP_LISTEN) 3669 return -EINVAL; 3670 3671 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 3672 return 0; 3673 3674 delta = msk->write_seq - v; 3675 if (__mptcp_check_fallback(msk) && msk->first) { 3676 struct tcp_sock *tp = tcp_sk(msk->first); 3677 3678 /* the first subflow is disconnected after close - see 3679 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq 3680 * so ignore that status, too. 3681 */ 3682 if (!((1 << msk->first->sk_state) & 3683 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))) 3684 delta += READ_ONCE(tp->write_seq) - tp->snd_una; 3685 } 3686 if (delta > INT_MAX) 3687 delta = INT_MAX; 3688 3689 return (int)delta; 3690 } 3691 3692 static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) 3693 { 3694 struct mptcp_sock *msk = mptcp_sk(sk); 3695 bool slow; 3696 3697 switch (cmd) { 3698 case SIOCINQ: 3699 if (sk->sk_state == TCP_LISTEN) 3700 return -EINVAL; 3701 3702 lock_sock(sk); 3703 if (__mptcp_move_skbs(sk)) 3704 mptcp_cleanup_rbuf(msk, 0); 3705 *karg = mptcp_inq_hint(sk); 3706 release_sock(sk); 3707 break; 3708 case SIOCOUTQ: 3709 slow = lock_sock_fast(sk); 3710 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); 3711 unlock_sock_fast(sk, slow); 3712 break; 3713 case SIOCOUTQNSD: 3714 slow = lock_sock_fast(sk); 3715 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt); 3716 unlock_sock_fast(sk, slow); 3717 break; 3718 default: 3719 return -ENOIOCTLCMD; 3720 } 3721 3722 return 0; 3723 } 3724 3725 static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 3726 { 3727 struct mptcp_subflow_context *subflow; 3728 struct mptcp_sock *msk = mptcp_sk(sk); 3729 int err = -EINVAL; 3730 struct sock *ssk; 3731 3732 ssk = __mptcp_nmpc_sk(msk); 3733 if (IS_ERR(ssk)) 3734 return PTR_ERR(ssk); 3735 3736 mptcp_set_state(sk, TCP_SYN_SENT); 3737 subflow = mptcp_subflow_ctx(ssk); 3738 #ifdef CONFIG_TCP_MD5SIG 3739 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 3740 * TCP option space. 3741 */ 3742 if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info)) 3743 mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK); 3744 #endif 3745 if (subflow->request_mptcp) { 3746 if (mptcp_active_should_disable(sk)) 3747 mptcp_early_fallback(msk, subflow, 3748 MPTCP_MIB_MPCAPABLEACTIVEDISABLED); 3749 else if (mptcp_token_new_connect(ssk) < 0) 3750 mptcp_early_fallback(msk, subflow, 3751 MPTCP_MIB_TOKENFALLBACKINIT); 3752 } 3753 3754 WRITE_ONCE(msk->write_seq, subflow->idsn); 3755 WRITE_ONCE(msk->snd_nxt, subflow->idsn); 3756 WRITE_ONCE(msk->snd_una, subflow->idsn); 3757 if (likely(!__mptcp_check_fallback(msk))) 3758 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE); 3759 3760 /* if reaching here via the fastopen/sendmsg path, the caller already 3761 * acquired the subflow socket lock, too. 3762 */ 3763 if (!msk->fastopening) 3764 lock_sock(ssk); 3765 3766 /* the following mirrors closely a very small chunk of code from 3767 * __inet_stream_connect() 3768 */ 3769 if (ssk->sk_state != TCP_CLOSE) 3770 goto out; 3771 3772 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) { 3773 err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len); 3774 if (err) 3775 goto out; 3776 } 3777 3778 err = ssk->sk_prot->connect(ssk, uaddr, addr_len); 3779 if (err < 0) 3780 goto out; 3781 3782 inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk)); 3783 3784 out: 3785 if (!msk->fastopening) 3786 release_sock(ssk); 3787 3788 /* on successful connect, the msk state will be moved to established by 3789 * subflow_finish_connect() 3790 */ 3791 if (unlikely(err)) { 3792 /* avoid leaving a dangling token in an unconnected socket */ 3793 mptcp_token_destroy(msk); 3794 mptcp_set_state(sk, TCP_CLOSE); 3795 return err; 3796 } 3797 3798 mptcp_copy_inaddrs(sk, ssk); 3799 return 0; 3800 } 3801 3802 static struct proto mptcp_prot = { 3803 .name = "MPTCP", 3804 .owner = THIS_MODULE, 3805 .init = mptcp_init_sock, 3806 .connect = mptcp_connect, 3807 .disconnect = mptcp_disconnect, 3808 .close = mptcp_close, 3809 .setsockopt = mptcp_setsockopt, 3810 .getsockopt = mptcp_getsockopt, 3811 .shutdown = mptcp_shutdown, 3812 .destroy = mptcp_destroy, 3813 .sendmsg = mptcp_sendmsg, 3814 .ioctl = mptcp_ioctl, 3815 .recvmsg = mptcp_recvmsg, 3816 .release_cb = mptcp_release_cb, 3817 .hash = mptcp_hash, 3818 .unhash = mptcp_unhash, 3819 .get_port = mptcp_get_port, 3820 .stream_memory_free = mptcp_stream_memory_free, 3821 .sockets_allocated = &mptcp_sockets_allocated, 3822 3823 .memory_allocated = &net_aligned_data.tcp_memory_allocated, 3824 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc, 3825 3826 .memory_pressure = &tcp_memory_pressure, 3827 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), 3828 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), 3829 .sysctl_mem = sysctl_tcp_mem, 3830 .obj_size = sizeof(struct mptcp_sock), 3831 .slab_flags = SLAB_TYPESAFE_BY_RCU, 3832 .no_autobind = true, 3833 }; 3834 3835 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3836 { 3837 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3838 struct sock *ssk, *sk = sock->sk; 3839 int err = -EINVAL; 3840 3841 lock_sock(sk); 3842 ssk = __mptcp_nmpc_sk(msk); 3843 if (IS_ERR(ssk)) { 3844 err = PTR_ERR(ssk); 3845 goto unlock; 3846 } 3847 3848 if (sk->sk_family == AF_INET) 3849 err = inet_bind_sk(ssk, uaddr, addr_len); 3850 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3851 else if (sk->sk_family == AF_INET6) 3852 err = inet6_bind_sk(ssk, uaddr, addr_len); 3853 #endif 3854 if (!err) 3855 mptcp_copy_inaddrs(sk, ssk); 3856 3857 unlock: 3858 release_sock(sk); 3859 return err; 3860 } 3861 3862 static int mptcp_listen(struct socket *sock, int backlog) 3863 { 3864 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3865 struct sock *sk = sock->sk; 3866 struct sock *ssk; 3867 int err; 3868 3869 pr_debug("msk=%p\n", msk); 3870 3871 lock_sock(sk); 3872 3873 err = -EINVAL; 3874 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) 3875 goto unlock; 3876 3877 ssk = __mptcp_nmpc_sk(msk); 3878 if (IS_ERR(ssk)) { 3879 err = PTR_ERR(ssk); 3880 goto unlock; 3881 } 3882 3883 mptcp_set_state(sk, TCP_LISTEN); 3884 sock_set_flag(sk, SOCK_RCU_FREE); 3885 3886 lock_sock(ssk); 3887 err = __inet_listen_sk(ssk, backlog); 3888 release_sock(ssk); 3889 mptcp_set_state(sk, inet_sk_state_load(ssk)); 3890 3891 if (!err) { 3892 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3893 mptcp_copy_inaddrs(sk, ssk); 3894 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); 3895 } 3896 3897 unlock: 3898 release_sock(sk); 3899 return err; 3900 } 3901 3902 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, 3903 struct proto_accept_arg *arg) 3904 { 3905 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3906 struct sock *ssk, *newsk; 3907 3908 pr_debug("msk=%p\n", msk); 3909 3910 /* Buggy applications can call accept on socket states other then LISTEN 3911 * but no need to allocate the first subflow just to error out. 3912 */ 3913 ssk = READ_ONCE(msk->first); 3914 if (!ssk) 3915 return -EINVAL; 3916 3917 pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk)); 3918 newsk = inet_csk_accept(ssk, arg); 3919 if (!newsk) 3920 return arg->err; 3921 3922 pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk)); 3923 if (sk_is_mptcp(newsk)) { 3924 struct mptcp_subflow_context *subflow; 3925 struct sock *new_mptcp_sock; 3926 3927 subflow = mptcp_subflow_ctx(newsk); 3928 new_mptcp_sock = subflow->conn; 3929 3930 /* is_mptcp should be false if subflow->conn is missing, see 3931 * subflow_syn_recv_sock() 3932 */ 3933 if (WARN_ON_ONCE(!new_mptcp_sock)) { 3934 tcp_sk(newsk)->is_mptcp = 0; 3935 goto tcpfallback; 3936 } 3937 3938 newsk = new_mptcp_sock; 3939 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK); 3940 3941 newsk->sk_kern_sock = arg->kern; 3942 lock_sock(newsk); 3943 __inet_accept(sock, newsock, newsk); 3944 3945 set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags); 3946 msk = mptcp_sk(newsk); 3947 msk->in_accept_queue = 0; 3948 3949 /* set ssk->sk_socket of accept()ed flows to mptcp socket. 3950 * This is needed so NOSPACE flag can be set from tcp stack. 3951 */ 3952 mptcp_for_each_subflow(msk, subflow) { 3953 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 3954 3955 if (!ssk->sk_socket) 3956 mptcp_sock_graft(ssk, newsock); 3957 } 3958 3959 mptcp_rps_record_subflows(msk); 3960 3961 /* Do late cleanup for the first subflow as necessary. Also 3962 * deal with bad peers not doing a complete shutdown. 3963 */ 3964 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { 3965 __mptcp_close_ssk(newsk, msk->first, 3966 mptcp_subflow_ctx(msk->first), 0); 3967 if (unlikely(list_is_singular(&msk->conn_list))) 3968 mptcp_set_state(newsk, TCP_CLOSE); 3969 } 3970 } else { 3971 tcpfallback: 3972 newsk->sk_kern_sock = arg->kern; 3973 lock_sock(newsk); 3974 __inet_accept(sock, newsock, newsk); 3975 /* we are being invoked after accepting a non-mp-capable 3976 * flow: sk is a tcp_sk, not an mptcp one. 3977 * 3978 * Hand the socket over to tcp so all further socket ops 3979 * bypass mptcp. 3980 */ 3981 WRITE_ONCE(newsock->sk->sk_socket->ops, 3982 mptcp_fallback_tcp_ops(newsock->sk)); 3983 } 3984 release_sock(newsk); 3985 3986 return 0; 3987 } 3988 3989 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) 3990 { 3991 struct sock *sk = (struct sock *)msk; 3992 3993 if (__mptcp_stream_is_writeable(sk, 1)) 3994 return EPOLLOUT | EPOLLWRNORM; 3995 3996 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 3997 smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */ 3998 if (__mptcp_stream_is_writeable(sk, 1)) 3999 return EPOLLOUT | EPOLLWRNORM; 4000 4001 return 0; 4002 } 4003 4004 static __poll_t mptcp_poll(struct file *file, struct socket *sock, 4005 struct poll_table_struct *wait) 4006 { 4007 struct sock *sk = sock->sk; 4008 struct mptcp_sock *msk; 4009 __poll_t mask = 0; 4010 u8 shutdown; 4011 int state; 4012 4013 msk = mptcp_sk(sk); 4014 sock_poll_wait(file, sock, wait); 4015 4016 state = inet_sk_state_load(sk); 4017 pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); 4018 if (state == TCP_LISTEN) { 4019 struct sock *ssk = READ_ONCE(msk->first); 4020 4021 if (WARN_ON_ONCE(!ssk)) 4022 return 0; 4023 4024 return inet_csk_listen_poll(ssk); 4025 } 4026 4027 shutdown = READ_ONCE(sk->sk_shutdown); 4028 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 4029 mask |= EPOLLHUP; 4030 if (shutdown & RCV_SHUTDOWN) 4031 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 4032 4033 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { 4034 mask |= mptcp_check_readable(sk); 4035 if (shutdown & SEND_SHUTDOWN) 4036 mask |= EPOLLOUT | EPOLLWRNORM; 4037 else 4038 mask |= mptcp_check_writeable(msk); 4039 } else if (state == TCP_SYN_SENT && 4040 inet_test_bit(DEFER_CONNECT, sk)) { 4041 /* cf tcp_poll() note about TFO */ 4042 mask |= EPOLLOUT | EPOLLWRNORM; 4043 } 4044 4045 /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */ 4046 smp_rmb(); 4047 if (READ_ONCE(sk->sk_err)) 4048 mask |= EPOLLERR; 4049 4050 return mask; 4051 } 4052 4053 static const struct proto_ops mptcp_stream_ops = { 4054 .family = PF_INET, 4055 .owner = THIS_MODULE, 4056 .release = inet_release, 4057 .bind = mptcp_bind, 4058 .connect = inet_stream_connect, 4059 .socketpair = sock_no_socketpair, 4060 .accept = mptcp_stream_accept, 4061 .getname = inet_getname, 4062 .poll = mptcp_poll, 4063 .ioctl = inet_ioctl, 4064 .gettstamp = sock_gettstamp, 4065 .listen = mptcp_listen, 4066 .shutdown = inet_shutdown, 4067 .setsockopt = sock_common_setsockopt, 4068 .getsockopt = sock_common_getsockopt, 4069 .sendmsg = inet_sendmsg, 4070 .recvmsg = inet_recvmsg, 4071 .mmap = sock_no_mmap, 4072 .set_rcvlowat = mptcp_set_rcvlowat, 4073 }; 4074 4075 static struct inet_protosw mptcp_protosw = { 4076 .type = SOCK_STREAM, 4077 .protocol = IPPROTO_MPTCP, 4078 .prot = &mptcp_prot, 4079 .ops = &mptcp_stream_ops, 4080 .flags = INET_PROTOSW_ICSK, 4081 }; 4082 4083 static int mptcp_napi_poll(struct napi_struct *napi, int budget) 4084 { 4085 struct mptcp_delegated_action *delegated; 4086 struct mptcp_subflow_context *subflow; 4087 int work_done = 0; 4088 4089 delegated = container_of(napi, struct mptcp_delegated_action, napi); 4090 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { 4091 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 4092 4093 bh_lock_sock_nested(ssk); 4094 if (!sock_owned_by_user(ssk)) { 4095 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); 4096 } else { 4097 /* tcp_release_cb_override already processed 4098 * the action or will do at next release_sock(). 4099 * In both case must dequeue the subflow here - on the same 4100 * CPU that scheduled it. 4101 */ 4102 smp_wmb(); 4103 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status); 4104 } 4105 bh_unlock_sock(ssk); 4106 sock_put(ssk); 4107 4108 if (++work_done == budget) 4109 return budget; 4110 } 4111 4112 /* always provide a 0 'work_done' argument, so that napi_complete_done 4113 * will not try accessing the NULL napi->dev ptr 4114 */ 4115 napi_complete_done(napi, 0); 4116 return work_done; 4117 } 4118 4119 void __init mptcp_proto_init(void) 4120 { 4121 struct mptcp_delegated_action *delegated; 4122 int cpu; 4123 4124 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; 4125 4126 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) 4127 panic("Failed to allocate MPTCP pcpu counter\n"); 4128 4129 mptcp_napi_dev = alloc_netdev_dummy(0); 4130 if (!mptcp_napi_dev) 4131 panic("Failed to allocate MPTCP dummy netdev\n"); 4132 for_each_possible_cpu(cpu) { 4133 delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu); 4134 INIT_LIST_HEAD(&delegated->head); 4135 netif_napi_add_tx(mptcp_napi_dev, &delegated->napi, 4136 mptcp_napi_poll); 4137 napi_enable(&delegated->napi); 4138 } 4139 4140 mptcp_subflow_init(); 4141 mptcp_pm_init(); 4142 mptcp_sched_init(); 4143 mptcp_token_init(); 4144 4145 if (proto_register(&mptcp_prot, 1) != 0) 4146 panic("Failed to register MPTCP proto.\n"); 4147 4148 inet_register_protosw(&mptcp_protosw); 4149 4150 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); 4151 } 4152 4153 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 4154 static const struct proto_ops mptcp_v6_stream_ops = { 4155 .family = PF_INET6, 4156 .owner = THIS_MODULE, 4157 .release = inet6_release, 4158 .bind = mptcp_bind, 4159 .connect = inet_stream_connect, 4160 .socketpair = sock_no_socketpair, 4161 .accept = mptcp_stream_accept, 4162 .getname = inet6_getname, 4163 .poll = mptcp_poll, 4164 .ioctl = inet6_ioctl, 4165 .gettstamp = sock_gettstamp, 4166 .listen = mptcp_listen, 4167 .shutdown = inet_shutdown, 4168 .setsockopt = sock_common_setsockopt, 4169 .getsockopt = sock_common_getsockopt, 4170 .sendmsg = inet6_sendmsg, 4171 .recvmsg = inet6_recvmsg, 4172 .mmap = sock_no_mmap, 4173 #ifdef CONFIG_COMPAT 4174 .compat_ioctl = inet6_compat_ioctl, 4175 #endif 4176 .set_rcvlowat = mptcp_set_rcvlowat, 4177 }; 4178 4179 static struct proto mptcp_v6_prot; 4180 4181 static struct inet_protosw mptcp_v6_protosw = { 4182 .type = SOCK_STREAM, 4183 .protocol = IPPROTO_MPTCP, 4184 .prot = &mptcp_v6_prot, 4185 .ops = &mptcp_v6_stream_ops, 4186 .flags = INET_PROTOSW_ICSK, 4187 }; 4188 4189 int __init mptcp_proto_v6_init(void) 4190 { 4191 int err; 4192 4193 mptcp_v6_prot = mptcp_prot; 4194 strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name)); 4195 mptcp_v6_prot.slab = NULL; 4196 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); 4197 mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np); 4198 4199 err = proto_register(&mptcp_v6_prot, 1); 4200 if (err) 4201 return err; 4202 4203 err = inet6_register_protosw(&mptcp_v6_protosw); 4204 if (err) 4205 proto_unregister(&mptcp_v6_prot); 4206 4207 return err; 4208 } 4209 #endif 4210