1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/sched/signal.h> 13 #include <linux/atomic.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> 17 #include <net/protocol.h> 18 #include <net/tcp_states.h> 19 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 20 #include <net/transp_v6.h> 21 #endif 22 #include <net/mptcp.h> 23 #include <net/hotdata.h> 24 #include <net/xfrm.h> 25 #include <asm/ioctls.h> 26 #include "protocol.h" 27 #include "mib.h" 28 29 #define CREATE_TRACE_POINTS 30 #include <trace/events/mptcp.h> 31 32 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 33 struct mptcp6_sock { 34 struct mptcp_sock msk; 35 struct ipv6_pinfo np; 36 }; 37 #endif 38 39 enum { 40 MPTCP_CMSG_TS = BIT(0), 41 MPTCP_CMSG_INQ = BIT(1), 42 }; 43 44 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; 45 46 static void __mptcp_destroy_sock(struct sock *sk); 47 static void mptcp_check_send_data_fin(struct sock *sk); 48 49 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions); 50 static struct net_device mptcp_napi_dev; 51 52 /* Returns end sequence number of the receiver's advertised window */ 53 static u64 mptcp_wnd_end(const struct mptcp_sock *msk) 54 { 55 return READ_ONCE(msk->wnd_end); 56 } 57 58 static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk) 59 { 60 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 61 if (sk->sk_prot == &tcpv6_prot) 62 return &inet6_stream_ops; 63 #endif 64 WARN_ON_ONCE(sk->sk_prot != &tcp_prot); 65 return &inet_stream_ops; 66 } 67 68 static int __mptcp_socket_create(struct mptcp_sock *msk) 69 { 70 struct mptcp_subflow_context *subflow; 71 struct sock *sk = (struct sock *)msk; 72 struct socket *ssock; 73 int err; 74 75 err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock); 76 if (err) 77 return err; 78 79 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio; 80 WRITE_ONCE(msk->first, ssock->sk); 81 subflow = mptcp_subflow_ctx(ssock->sk); 82 list_add(&subflow->node, &msk->conn_list); 83 sock_hold(ssock->sk); 84 subflow->request_mptcp = 1; 85 subflow->subflow_id = msk->subflow_id++; 86 87 /* This is the first subflow, always with id 0 */ 88 WRITE_ONCE(subflow->local_id, 0); 89 mptcp_sock_graft(msk->first, sk->sk_socket); 90 iput(SOCK_INODE(ssock)); 91 92 return 0; 93 } 94 95 /* If the MPC handshake is not started, returns the first subflow, 96 * eventually allocating it. 97 */ 98 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk) 99 { 100 struct sock *sk = (struct sock *)msk; 101 int ret; 102 103 if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 104 return ERR_PTR(-EINVAL); 105 106 if (!msk->first) { 107 ret = __mptcp_socket_create(msk); 108 if (ret) 109 return ERR_PTR(ret); 110 } 111 112 return msk->first; 113 } 114 115 static void mptcp_drop(struct sock *sk, struct sk_buff *skb) 116 { 117 sk_drops_add(sk, skb); 118 __kfree_skb(skb); 119 } 120 121 static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size) 122 { 123 WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc, 124 mptcp_sk(sk)->rmem_fwd_alloc + size); 125 } 126 127 static void mptcp_rmem_charge(struct sock *sk, int size) 128 { 129 mptcp_rmem_fwd_alloc_add(sk, -size); 130 } 131 132 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, 133 struct sk_buff *from) 134 { 135 bool fragstolen; 136 int delta; 137 138 if (MPTCP_SKB_CB(from)->offset || 139 ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) || 140 !skb_try_coalesce(to, from, &fragstolen, &delta)) 141 return false; 142 143 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n", 144 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, 145 to->len, MPTCP_SKB_CB(from)->end_seq); 146 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; 147 148 /* note the fwd memory can reach a negative value after accounting 149 * for the delta, but the later skb free will restore a non 150 * negative one 151 */ 152 atomic_add(delta, &sk->sk_rmem_alloc); 153 mptcp_rmem_charge(sk, delta); 154 kfree_skb_partial(from, fragstolen); 155 156 return true; 157 } 158 159 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, 160 struct sk_buff *from) 161 { 162 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) 163 return false; 164 165 return mptcp_try_coalesce((struct sock *)msk, to, from); 166 } 167 168 static void __mptcp_rmem_reclaim(struct sock *sk, int amount) 169 { 170 amount >>= PAGE_SHIFT; 171 mptcp_rmem_charge(sk, amount << PAGE_SHIFT); 172 __sk_mem_reduce_allocated(sk, amount); 173 } 174 175 static void mptcp_rmem_uncharge(struct sock *sk, int size) 176 { 177 struct mptcp_sock *msk = mptcp_sk(sk); 178 int reclaimable; 179 180 mptcp_rmem_fwd_alloc_add(sk, size); 181 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); 182 183 /* see sk_mem_uncharge() for the rationale behind the following schema */ 184 if (unlikely(reclaimable >= PAGE_SIZE)) 185 __mptcp_rmem_reclaim(sk, reclaimable); 186 } 187 188 static void mptcp_rfree(struct sk_buff *skb) 189 { 190 unsigned int len = skb->truesize; 191 struct sock *sk = skb->sk; 192 193 atomic_sub(len, &sk->sk_rmem_alloc); 194 mptcp_rmem_uncharge(sk, len); 195 } 196 197 void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk) 198 { 199 skb_orphan(skb); 200 skb->sk = sk; 201 skb->destructor = mptcp_rfree; 202 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 203 mptcp_rmem_charge(sk, skb->truesize); 204 } 205 206 /* "inspired" by tcp_data_queue_ofo(), main differences: 207 * - use mptcp seqs 208 * - don't cope with sacks 209 */ 210 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) 211 { 212 struct sock *sk = (struct sock *)msk; 213 struct rb_node **p, *parent; 214 u64 seq, end_seq, max_seq; 215 struct sk_buff *skb1; 216 217 seq = MPTCP_SKB_CB(skb)->map_seq; 218 end_seq = MPTCP_SKB_CB(skb)->end_seq; 219 max_seq = atomic64_read(&msk->rcv_wnd_sent); 220 221 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, 222 RB_EMPTY_ROOT(&msk->out_of_order_queue)); 223 if (after64(end_seq, max_seq)) { 224 /* out of window */ 225 mptcp_drop(sk, skb); 226 pr_debug("oow by %lld, rcv_wnd_sent %llu\n", 227 (unsigned long long)end_seq - (unsigned long)max_seq, 228 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent)); 229 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); 230 return; 231 } 232 233 p = &msk->out_of_order_queue.rb_node; 234 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE); 235 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { 236 rb_link_node(&skb->rbnode, NULL, p); 237 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); 238 msk->ooo_last_skb = skb; 239 goto end; 240 } 241 242 /* with 2 subflows, adding at end of ooo queue is quite likely 243 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 244 */ 245 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { 246 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); 247 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); 248 return; 249 } 250 251 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ 252 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { 253 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); 254 parent = &msk->ooo_last_skb->rbnode; 255 p = &parent->rb_right; 256 goto insert; 257 } 258 259 /* Find place to insert this segment. Handle overlaps on the way. */ 260 parent = NULL; 261 while (*p) { 262 parent = *p; 263 skb1 = rb_to_skb(parent); 264 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { 265 p = &parent->rb_left; 266 continue; 267 } 268 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { 269 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { 270 /* All the bits are present. Drop. */ 271 mptcp_drop(sk, skb); 272 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 273 return; 274 } 275 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { 276 /* partial overlap: 277 * | skb | 278 * | skb1 | 279 * continue traversing 280 */ 281 } else { 282 /* skb's seq == skb1's seq and skb covers skb1. 283 * Replace skb1 with skb. 284 */ 285 rb_replace_node(&skb1->rbnode, &skb->rbnode, 286 &msk->out_of_order_queue); 287 mptcp_drop(sk, skb1); 288 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 289 goto merge_right; 290 } 291 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { 292 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); 293 return; 294 } 295 p = &parent->rb_right; 296 } 297 298 insert: 299 /* Insert segment into RB tree. */ 300 rb_link_node(&skb->rbnode, parent, p); 301 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); 302 303 merge_right: 304 /* Remove other segments covered by skb. */ 305 while ((skb1 = skb_rb_next(skb)) != NULL) { 306 if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) 307 break; 308 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); 309 mptcp_drop(sk, skb1); 310 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 311 } 312 /* If there is no skb after us, we are the last_skb ! */ 313 if (!skb1) 314 msk->ooo_last_skb = skb; 315 316 end: 317 skb_condense(skb); 318 mptcp_set_owner_r(skb, sk); 319 } 320 321 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size) 322 { 323 struct mptcp_sock *msk = mptcp_sk(sk); 324 int amt, amount; 325 326 if (size <= msk->rmem_fwd_alloc) 327 return true; 328 329 size -= msk->rmem_fwd_alloc; 330 amt = sk_mem_pages(size); 331 amount = amt << PAGE_SHIFT; 332 if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) 333 return false; 334 335 mptcp_rmem_fwd_alloc_add(sk, amount); 336 return true; 337 } 338 339 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, 340 struct sk_buff *skb, unsigned int offset, 341 size_t copy_len) 342 { 343 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 344 struct sock *sk = (struct sock *)msk; 345 struct sk_buff *tail; 346 bool has_rxtstamp; 347 348 __skb_unlink(skb, &ssk->sk_receive_queue); 349 350 skb_ext_reset(skb); 351 skb_orphan(skb); 352 353 /* try to fetch required memory from subflow */ 354 if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) { 355 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); 356 goto drop; 357 } 358 359 has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; 360 361 /* the skb map_seq accounts for the skb offset: 362 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq 363 * value 364 */ 365 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); 366 MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; 367 MPTCP_SKB_CB(skb)->offset = offset; 368 MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp; 369 370 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { 371 /* in sequence */ 372 msk->bytes_received += copy_len; 373 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); 374 tail = skb_peek_tail(&sk->sk_receive_queue); 375 if (tail && mptcp_try_coalesce(sk, tail, skb)) 376 return true; 377 378 mptcp_set_owner_r(skb, sk); 379 __skb_queue_tail(&sk->sk_receive_queue, skb); 380 return true; 381 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { 382 mptcp_data_queue_ofo(msk, skb); 383 return false; 384 } 385 386 /* old data, keep it simple and drop the whole pkt, sender 387 * will retransmit as needed, if needed. 388 */ 389 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 390 drop: 391 mptcp_drop(sk, skb); 392 return false; 393 } 394 395 static void mptcp_stop_rtx_timer(struct sock *sk) 396 { 397 struct inet_connection_sock *icsk = inet_csk(sk); 398 399 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 400 mptcp_sk(sk)->timer_ival = 0; 401 } 402 403 static void mptcp_close_wake_up(struct sock *sk) 404 { 405 if (sock_flag(sk, SOCK_DEAD)) 406 return; 407 408 sk->sk_state_change(sk); 409 if (sk->sk_shutdown == SHUTDOWN_MASK || 410 sk->sk_state == TCP_CLOSE) 411 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 412 else 413 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 414 } 415 416 /* called under the msk socket lock */ 417 static bool mptcp_pending_data_fin_ack(struct sock *sk) 418 { 419 struct mptcp_sock *msk = mptcp_sk(sk); 420 421 return ((1 << sk->sk_state) & 422 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && 423 msk->write_seq == READ_ONCE(msk->snd_una); 424 } 425 426 static void mptcp_check_data_fin_ack(struct sock *sk) 427 { 428 struct mptcp_sock *msk = mptcp_sk(sk); 429 430 /* Look for an acknowledged DATA_FIN */ 431 if (mptcp_pending_data_fin_ack(sk)) { 432 WRITE_ONCE(msk->snd_data_fin_enable, 0); 433 434 switch (sk->sk_state) { 435 case TCP_FIN_WAIT1: 436 mptcp_set_state(sk, TCP_FIN_WAIT2); 437 break; 438 case TCP_CLOSING: 439 case TCP_LAST_ACK: 440 mptcp_set_state(sk, TCP_CLOSE); 441 break; 442 } 443 444 mptcp_close_wake_up(sk); 445 } 446 } 447 448 /* can be called with no lock acquired */ 449 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) 450 { 451 struct mptcp_sock *msk = mptcp_sk(sk); 452 453 if (READ_ONCE(msk->rcv_data_fin) && 454 ((1 << inet_sk_state_load(sk)) & 455 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { 456 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); 457 458 if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) { 459 if (seq) 460 *seq = rcv_data_fin_seq; 461 462 return true; 463 } 464 } 465 466 return false; 467 } 468 469 static void mptcp_set_datafin_timeout(struct sock *sk) 470 { 471 struct inet_connection_sock *icsk = inet_csk(sk); 472 u32 retransmits; 473 474 retransmits = min_t(u32, icsk->icsk_retransmits, 475 ilog2(TCP_RTO_MAX / TCP_RTO_MIN)); 476 477 mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits; 478 } 479 480 static void __mptcp_set_timeout(struct sock *sk, long tout) 481 { 482 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; 483 } 484 485 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) 486 { 487 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 488 489 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? 490 inet_csk(ssk)->icsk_timeout - jiffies : 0; 491 } 492 493 static void mptcp_set_timeout(struct sock *sk) 494 { 495 struct mptcp_subflow_context *subflow; 496 long tout = 0; 497 498 mptcp_for_each_subflow(mptcp_sk(sk), subflow) 499 tout = max(tout, mptcp_timeout_from_subflow(subflow)); 500 __mptcp_set_timeout(sk, tout); 501 } 502 503 static inline bool tcp_can_send_ack(const struct sock *ssk) 504 { 505 return !((1 << inet_sk_state_load(ssk)) & 506 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN)); 507 } 508 509 void __mptcp_subflow_send_ack(struct sock *ssk) 510 { 511 if (tcp_can_send_ack(ssk)) 512 tcp_send_ack(ssk); 513 } 514 515 static void mptcp_subflow_send_ack(struct sock *ssk) 516 { 517 bool slow; 518 519 slow = lock_sock_fast(ssk); 520 __mptcp_subflow_send_ack(ssk); 521 unlock_sock_fast(ssk, slow); 522 } 523 524 static void mptcp_send_ack(struct mptcp_sock *msk) 525 { 526 struct mptcp_subflow_context *subflow; 527 528 mptcp_for_each_subflow(msk, subflow) 529 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); 530 } 531 532 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied) 533 { 534 bool slow; 535 536 slow = lock_sock_fast(ssk); 537 if (tcp_can_send_ack(ssk)) 538 tcp_cleanup_rbuf(ssk, copied); 539 unlock_sock_fast(ssk, slow); 540 } 541 542 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) 543 { 544 const struct inet_connection_sock *icsk = inet_csk(ssk); 545 u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending); 546 const struct tcp_sock *tp = tcp_sk(ssk); 547 548 return (ack_pending & ICSK_ACK_SCHED) && 549 ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) > 550 READ_ONCE(icsk->icsk_ack.rcv_mss)) || 551 (rx_empty && ack_pending & 552 (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); 553 } 554 555 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) 556 { 557 int old_space = READ_ONCE(msk->old_wspace); 558 struct mptcp_subflow_context *subflow; 559 struct sock *sk = (struct sock *)msk; 560 int space = __mptcp_space(sk); 561 bool cleanup, rx_empty; 562 563 cleanup = (space > 0) && (space >= (old_space << 1)) && copied; 564 rx_empty = !__mptcp_rmem(sk) && copied; 565 566 mptcp_for_each_subflow(msk, subflow) { 567 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 568 569 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) 570 mptcp_subflow_cleanup_rbuf(ssk, copied); 571 } 572 } 573 574 static bool mptcp_check_data_fin(struct sock *sk) 575 { 576 struct mptcp_sock *msk = mptcp_sk(sk); 577 u64 rcv_data_fin_seq; 578 bool ret = false; 579 580 /* Need to ack a DATA_FIN received from a peer while this side 581 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. 582 * msk->rcv_data_fin was set when parsing the incoming options 583 * at the subflow level and the msk lock was not held, so this 584 * is the first opportunity to act on the DATA_FIN and change 585 * the msk state. 586 * 587 * If we are caught up to the sequence number of the incoming 588 * DATA_FIN, send the DATA_ACK now and do state transition. If 589 * not caught up, do nothing and let the recv code send DATA_ACK 590 * when catching up. 591 */ 592 593 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { 594 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); 595 WRITE_ONCE(msk->rcv_data_fin, 0); 596 597 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); 598 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 599 600 switch (sk->sk_state) { 601 case TCP_ESTABLISHED: 602 mptcp_set_state(sk, TCP_CLOSE_WAIT); 603 break; 604 case TCP_FIN_WAIT1: 605 mptcp_set_state(sk, TCP_CLOSING); 606 break; 607 case TCP_FIN_WAIT2: 608 mptcp_set_state(sk, TCP_CLOSE); 609 break; 610 default: 611 /* Other states not expected */ 612 WARN_ON_ONCE(1); 613 break; 614 } 615 616 ret = true; 617 if (!__mptcp_check_fallback(msk)) 618 mptcp_send_ack(msk); 619 mptcp_close_wake_up(sk); 620 } 621 return ret; 622 } 623 624 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) 625 { 626 if (READ_ONCE(msk->allow_infinite_fallback)) { 627 MPTCP_INC_STATS(sock_net(ssk), 628 MPTCP_MIB_DSSCORRUPTIONFALLBACK); 629 mptcp_do_fallback(ssk); 630 } else { 631 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET); 632 mptcp_subflow_reset(ssk); 633 } 634 } 635 636 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, 637 struct sock *ssk, 638 unsigned int *bytes) 639 { 640 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 641 struct sock *sk = (struct sock *)msk; 642 unsigned int moved = 0; 643 bool more_data_avail; 644 struct tcp_sock *tp; 645 bool done = false; 646 int sk_rbuf; 647 648 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); 649 650 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 651 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); 652 653 if (unlikely(ssk_rbuf > sk_rbuf)) { 654 WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); 655 sk_rbuf = ssk_rbuf; 656 } 657 } 658 659 pr_debug("msk=%p ssk=%p\n", msk, ssk); 660 tp = tcp_sk(ssk); 661 do { 662 u32 map_remaining, offset; 663 u32 seq = tp->copied_seq; 664 struct sk_buff *skb; 665 bool fin; 666 667 /* try to move as much data as available */ 668 map_remaining = subflow->map_data_len - 669 mptcp_subflow_get_map_offset(subflow); 670 671 skb = skb_peek(&ssk->sk_receive_queue); 672 if (!skb) { 673 /* With racing move_skbs_to_msk() and __mptcp_move_skbs(), 674 * a different CPU can have already processed the pending 675 * data, stop here or we can enter an infinite loop 676 */ 677 if (!moved) 678 done = true; 679 break; 680 } 681 682 if (__mptcp_check_fallback(msk)) { 683 /* Under fallback skbs have no MPTCP extension and TCP could 684 * collapse them between the dummy map creation and the 685 * current dequeue. Be sure to adjust the map size. 686 */ 687 map_remaining = skb->len; 688 subflow->map_data_len = skb->len; 689 } 690 691 offset = seq - TCP_SKB_CB(skb)->seq; 692 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 693 if (fin) { 694 done = true; 695 seq++; 696 } 697 698 if (offset < skb->len) { 699 size_t len = skb->len - offset; 700 701 if (tp->urg_data) 702 done = true; 703 704 if (__mptcp_move_skb(msk, ssk, skb, offset, len)) 705 moved += len; 706 seq += len; 707 708 if (unlikely(map_remaining < len)) { 709 DEBUG_NET_WARN_ON_ONCE(1); 710 mptcp_dss_corruption(msk, ssk); 711 } 712 } else { 713 if (unlikely(!fin)) { 714 DEBUG_NET_WARN_ON_ONCE(1); 715 mptcp_dss_corruption(msk, ssk); 716 } 717 718 sk_eat_skb(ssk, skb); 719 done = true; 720 } 721 722 WRITE_ONCE(tp->copied_seq, seq); 723 more_data_avail = mptcp_subflow_data_available(ssk); 724 725 if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) { 726 done = true; 727 break; 728 } 729 } while (more_data_avail); 730 731 if (moved > 0) 732 msk->last_data_recv = tcp_jiffies32; 733 *bytes += moved; 734 return done; 735 } 736 737 static bool __mptcp_ofo_queue(struct mptcp_sock *msk) 738 { 739 struct sock *sk = (struct sock *)msk; 740 struct sk_buff *skb, *tail; 741 bool moved = false; 742 struct rb_node *p; 743 u64 end_seq; 744 745 p = rb_first(&msk->out_of_order_queue); 746 pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); 747 while (p) { 748 skb = rb_to_skb(p); 749 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) 750 break; 751 752 p = rb_next(p); 753 rb_erase(&skb->rbnode, &msk->out_of_order_queue); 754 755 if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq, 756 msk->ack_seq))) { 757 mptcp_drop(sk, skb); 758 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); 759 continue; 760 } 761 762 end_seq = MPTCP_SKB_CB(skb)->end_seq; 763 tail = skb_peek_tail(&sk->sk_receive_queue); 764 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { 765 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; 766 767 /* skip overlapping data, if any */ 768 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n", 769 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, 770 delta); 771 MPTCP_SKB_CB(skb)->offset += delta; 772 MPTCP_SKB_CB(skb)->map_seq += delta; 773 __skb_queue_tail(&sk->sk_receive_queue, skb); 774 } 775 msk->bytes_received += end_seq - msk->ack_seq; 776 WRITE_ONCE(msk->ack_seq, end_seq); 777 moved = true; 778 } 779 return moved; 780 } 781 782 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) 783 { 784 int err = sock_error(ssk); 785 int ssk_state; 786 787 if (!err) 788 return false; 789 790 /* only propagate errors on fallen-back sockets or 791 * on MPC connect 792 */ 793 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) 794 return false; 795 796 /* We need to propagate only transition to CLOSE state. 797 * Orphaned socket will see such state change via 798 * subflow_sched_work_if_closed() and that path will properly 799 * destroy the msk as needed. 800 */ 801 ssk_state = inet_sk_state_load(ssk); 802 if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) 803 mptcp_set_state(sk, ssk_state); 804 WRITE_ONCE(sk->sk_err, -err); 805 806 /* This barrier is coupled with smp_rmb() in mptcp_poll() */ 807 smp_wmb(); 808 sk_error_report(sk); 809 return true; 810 } 811 812 void __mptcp_error_report(struct sock *sk) 813 { 814 struct mptcp_subflow_context *subflow; 815 struct mptcp_sock *msk = mptcp_sk(sk); 816 817 mptcp_for_each_subflow(msk, subflow) 818 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) 819 break; 820 } 821 822 /* In most cases we will be able to lock the mptcp socket. If its already 823 * owned, we need to defer to the work queue to avoid ABBA deadlock. 824 */ 825 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) 826 { 827 struct sock *sk = (struct sock *)msk; 828 unsigned int moved = 0; 829 830 __mptcp_move_skbs_from_subflow(msk, ssk, &moved); 831 __mptcp_ofo_queue(msk); 832 if (unlikely(ssk->sk_err)) { 833 if (!sock_owned_by_user(sk)) 834 __mptcp_error_report(sk); 835 else 836 __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags); 837 } 838 839 /* If the moves have caught up with the DATA_FIN sequence number 840 * it's time to ack the DATA_FIN and change socket state, but 841 * this is not a good place to change state. Let the workqueue 842 * do it. 843 */ 844 if (mptcp_pending_data_fin(sk, NULL)) 845 mptcp_schedule_work(sk); 846 return moved > 0; 847 } 848 849 void mptcp_data_ready(struct sock *sk, struct sock *ssk) 850 { 851 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 852 struct mptcp_sock *msk = mptcp_sk(sk); 853 int sk_rbuf, ssk_rbuf; 854 855 /* The peer can send data while we are shutting down this 856 * subflow at msk destruction time, but we must avoid enqueuing 857 * more data to the msk receive queue 858 */ 859 if (unlikely(subflow->disposable)) 860 return; 861 862 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); 863 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); 864 if (unlikely(ssk_rbuf > sk_rbuf)) 865 sk_rbuf = ssk_rbuf; 866 867 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/ 868 if (__mptcp_rmem(sk) > sk_rbuf) 869 return; 870 871 /* Wake-up the reader only for in-sequence data */ 872 mptcp_data_lock(sk); 873 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) 874 sk->sk_data_ready(sk); 875 mptcp_data_unlock(sk); 876 } 877 878 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) 879 { 880 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); 881 WRITE_ONCE(msk->allow_infinite_fallback, false); 882 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); 883 } 884 885 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) 886 { 887 struct sock *sk = (struct sock *)msk; 888 889 if (sk->sk_state != TCP_ESTABLISHED) 890 return false; 891 892 /* attach to msk socket only after we are sure we will deal with it 893 * at close time 894 */ 895 if (sk->sk_socket && !ssk->sk_socket) 896 mptcp_sock_graft(ssk, sk->sk_socket); 897 898 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; 899 mptcp_sockopt_sync_locked(msk, ssk); 900 mptcp_subflow_joined(msk, ssk); 901 mptcp_stop_tout_timer(sk); 902 __mptcp_propagate_sndbuf(sk, ssk); 903 return true; 904 } 905 906 static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list) 907 { 908 struct mptcp_subflow_context *tmp, *subflow; 909 struct mptcp_sock *msk = mptcp_sk(sk); 910 911 list_for_each_entry_safe(subflow, tmp, join_list, node) { 912 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 913 bool slow = lock_sock_fast(ssk); 914 915 list_move_tail(&subflow->node, &msk->conn_list); 916 if (!__mptcp_finish_join(msk, ssk)) 917 mptcp_subflow_reset(ssk); 918 unlock_sock_fast(ssk, slow); 919 } 920 } 921 922 static bool mptcp_rtx_timer_pending(struct sock *sk) 923 { 924 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); 925 } 926 927 static void mptcp_reset_rtx_timer(struct sock *sk) 928 { 929 struct inet_connection_sock *icsk = inet_csk(sk); 930 unsigned long tout; 931 932 /* prevent rescheduling on close */ 933 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE)) 934 return; 935 936 tout = mptcp_sk(sk)->timer_ival; 937 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); 938 } 939 940 bool mptcp_schedule_work(struct sock *sk) 941 { 942 if (inet_sk_state_load(sk) != TCP_CLOSE && 943 schedule_work(&mptcp_sk(sk)->work)) { 944 /* each subflow already holds a reference to the sk, and the 945 * workqueue is invoked by a subflow, so sk can't go away here. 946 */ 947 sock_hold(sk); 948 return true; 949 } 950 return false; 951 } 952 953 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) 954 { 955 struct mptcp_subflow_context *subflow; 956 957 msk_owned_by_me(msk); 958 959 mptcp_for_each_subflow(msk, subflow) { 960 if (READ_ONCE(subflow->data_avail)) 961 return mptcp_subflow_tcp_sock(subflow); 962 } 963 964 return NULL; 965 } 966 967 static bool mptcp_skb_can_collapse_to(u64 write_seq, 968 const struct sk_buff *skb, 969 const struct mptcp_ext *mpext) 970 { 971 if (!tcp_skb_can_collapse_to(skb)) 972 return false; 973 974 /* can collapse only if MPTCP level sequence is in order and this 975 * mapping has not been xmitted yet 976 */ 977 return mpext && mpext->data_seq + mpext->data_len == write_seq && 978 !mpext->frozen; 979 } 980 981 /* we can append data to the given data frag if: 982 * - there is space available in the backing page_frag 983 * - the data frag tail matches the current page_frag free offset 984 * - the data frag end sequence number matches the current write seq 985 */ 986 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, 987 const struct page_frag *pfrag, 988 const struct mptcp_data_frag *df) 989 { 990 return df && pfrag->page == df->page && 991 pfrag->size - pfrag->offset > 0 && 992 pfrag->offset == (df->offset + df->data_len) && 993 df->data_seq + df->data_len == msk->write_seq; 994 } 995 996 static void dfrag_uncharge(struct sock *sk, int len) 997 { 998 sk_mem_uncharge(sk, len); 999 sk_wmem_queued_add(sk, -len); 1000 } 1001 1002 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) 1003 { 1004 int len = dfrag->data_len + dfrag->overhead; 1005 1006 list_del(&dfrag->list); 1007 dfrag_uncharge(sk, len); 1008 put_page(dfrag->page); 1009 } 1010 1011 /* called under both the msk socket lock and the data lock */ 1012 static void __mptcp_clean_una(struct sock *sk) 1013 { 1014 struct mptcp_sock *msk = mptcp_sk(sk); 1015 struct mptcp_data_frag *dtmp, *dfrag; 1016 u64 snd_una; 1017 1018 snd_una = msk->snd_una; 1019 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { 1020 if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) 1021 break; 1022 1023 if (unlikely(dfrag == msk->first_pending)) { 1024 /* in recovery mode can see ack after the current snd head */ 1025 if (WARN_ON_ONCE(!msk->recovery)) 1026 break; 1027 1028 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1029 } 1030 1031 dfrag_clear(sk, dfrag); 1032 } 1033 1034 dfrag = mptcp_rtx_head(sk); 1035 if (dfrag && after64(snd_una, dfrag->data_seq)) { 1036 u64 delta = snd_una - dfrag->data_seq; 1037 1038 /* prevent wrap around in recovery mode */ 1039 if (unlikely(delta > dfrag->already_sent)) { 1040 if (WARN_ON_ONCE(!msk->recovery)) 1041 goto out; 1042 if (WARN_ON_ONCE(delta > dfrag->data_len)) 1043 goto out; 1044 dfrag->already_sent += delta - dfrag->already_sent; 1045 } 1046 1047 dfrag->data_seq += delta; 1048 dfrag->offset += delta; 1049 dfrag->data_len -= delta; 1050 dfrag->already_sent -= delta; 1051 1052 dfrag_uncharge(sk, delta); 1053 } 1054 1055 /* all retransmitted data acked, recovery completed */ 1056 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) 1057 msk->recovery = false; 1058 1059 out: 1060 if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) { 1061 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) 1062 mptcp_stop_rtx_timer(sk); 1063 } else { 1064 mptcp_reset_rtx_timer(sk); 1065 } 1066 1067 if (mptcp_pending_data_fin_ack(sk)) 1068 mptcp_schedule_work(sk); 1069 } 1070 1071 static void __mptcp_clean_una_wakeup(struct sock *sk) 1072 { 1073 lockdep_assert_held_once(&sk->sk_lock.slock); 1074 1075 __mptcp_clean_una(sk); 1076 mptcp_write_space(sk); 1077 } 1078 1079 static void mptcp_clean_una_wakeup(struct sock *sk) 1080 { 1081 mptcp_data_lock(sk); 1082 __mptcp_clean_una_wakeup(sk); 1083 mptcp_data_unlock(sk); 1084 } 1085 1086 static void mptcp_enter_memory_pressure(struct sock *sk) 1087 { 1088 struct mptcp_subflow_context *subflow; 1089 struct mptcp_sock *msk = mptcp_sk(sk); 1090 bool first = true; 1091 1092 mptcp_for_each_subflow(msk, subflow) { 1093 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 1094 1095 if (first) 1096 tcp_enter_memory_pressure(ssk); 1097 sk_stream_moderate_sndbuf(ssk); 1098 1099 first = false; 1100 } 1101 __mptcp_sync_sndbuf(sk); 1102 } 1103 1104 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of 1105 * data 1106 */ 1107 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 1108 { 1109 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), 1110 pfrag, sk->sk_allocation))) 1111 return true; 1112 1113 mptcp_enter_memory_pressure(sk); 1114 return false; 1115 } 1116 1117 static struct mptcp_data_frag * 1118 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, 1119 int orig_offset) 1120 { 1121 int offset = ALIGN(orig_offset, sizeof(long)); 1122 struct mptcp_data_frag *dfrag; 1123 1124 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); 1125 dfrag->data_len = 0; 1126 dfrag->data_seq = msk->write_seq; 1127 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); 1128 dfrag->offset = offset + sizeof(struct mptcp_data_frag); 1129 dfrag->already_sent = 0; 1130 dfrag->page = pfrag->page; 1131 1132 return dfrag; 1133 } 1134 1135 struct mptcp_sendmsg_info { 1136 int mss_now; 1137 int size_goal; 1138 u16 limit; 1139 u16 sent; 1140 unsigned int flags; 1141 bool data_lock_held; 1142 }; 1143 1144 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, 1145 u64 data_seq, int avail_size) 1146 { 1147 u64 window_end = mptcp_wnd_end(msk); 1148 u64 mptcp_snd_wnd; 1149 1150 if (__mptcp_check_fallback(msk)) 1151 return avail_size; 1152 1153 mptcp_snd_wnd = window_end - data_seq; 1154 avail_size = min_t(unsigned int, mptcp_snd_wnd, avail_size); 1155 1156 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { 1157 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); 1158 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED); 1159 } 1160 1161 return avail_size; 1162 } 1163 1164 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp) 1165 { 1166 struct skb_ext *mpext = __skb_ext_alloc(gfp); 1167 1168 if (!mpext) 1169 return false; 1170 __skb_ext_set(skb, SKB_EXT_MPTCP, mpext); 1171 return true; 1172 } 1173 1174 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) 1175 { 1176 struct sk_buff *skb; 1177 1178 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 1179 if (likely(skb)) { 1180 if (likely(__mptcp_add_ext(skb, gfp))) { 1181 skb_reserve(skb, MAX_TCP_HEADER); 1182 skb->ip_summed = CHECKSUM_PARTIAL; 1183 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 1184 return skb; 1185 } 1186 __kfree_skb(skb); 1187 } else { 1188 mptcp_enter_memory_pressure(sk); 1189 } 1190 return NULL; 1191 } 1192 1193 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) 1194 { 1195 struct sk_buff *skb; 1196 1197 skb = __mptcp_do_alloc_tx_skb(sk, gfp); 1198 if (!skb) 1199 return NULL; 1200 1201 if (likely(sk_wmem_schedule(ssk, skb->truesize))) { 1202 tcp_skb_entail(ssk, skb); 1203 return skb; 1204 } 1205 tcp_skb_tsorted_anchor_cleanup(skb); 1206 kfree_skb(skb); 1207 return NULL; 1208 } 1209 1210 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) 1211 { 1212 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; 1213 1214 return __mptcp_alloc_tx_skb(sk, ssk, gfp); 1215 } 1216 1217 /* note: this always recompute the csum on the whole skb, even 1218 * if we just appended a single frag. More status info needed 1219 */ 1220 static void mptcp_update_data_checksum(struct sk_buff *skb, int added) 1221 { 1222 struct mptcp_ext *mpext = mptcp_get_ext(skb); 1223 __wsum csum = ~csum_unfold(mpext->csum); 1224 int offset = skb->len - added; 1225 1226 mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset)); 1227 } 1228 1229 static void mptcp_update_infinite_map(struct mptcp_sock *msk, 1230 struct sock *ssk, 1231 struct mptcp_ext *mpext) 1232 { 1233 if (!mpext) 1234 return; 1235 1236 mpext->infinite_map = 1; 1237 mpext->data_len = 0; 1238 1239 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX); 1240 mptcp_subflow_ctx(ssk)->send_infinite_map = 0; 1241 pr_fallback(msk); 1242 mptcp_do_fallback(ssk); 1243 } 1244 1245 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1)) 1246 1247 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, 1248 struct mptcp_data_frag *dfrag, 1249 struct mptcp_sendmsg_info *info) 1250 { 1251 u64 data_seq = dfrag->data_seq + info->sent; 1252 int offset = dfrag->offset + info->sent; 1253 struct mptcp_sock *msk = mptcp_sk(sk); 1254 bool zero_window_probe = false; 1255 struct mptcp_ext *mpext = NULL; 1256 bool can_coalesce = false; 1257 bool reuse_skb = true; 1258 struct sk_buff *skb; 1259 size_t copy; 1260 int i; 1261 1262 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n", 1263 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); 1264 1265 if (WARN_ON_ONCE(info->sent > info->limit || 1266 info->limit > dfrag->data_len)) 1267 return 0; 1268 1269 if (unlikely(!__tcp_can_send(ssk))) 1270 return -EAGAIN; 1271 1272 /* compute send limit */ 1273 if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE)) 1274 ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE; 1275 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); 1276 copy = info->size_goal; 1277 1278 skb = tcp_write_queue_tail(ssk); 1279 if (skb && copy > skb->len) { 1280 /* Limit the write to the size available in the 1281 * current skb, if any, so that we create at most a new skb. 1282 * Explicitly tells TCP internals to avoid collapsing on later 1283 * queue management operation, to avoid breaking the ext <-> 1284 * SSN association set here 1285 */ 1286 mpext = mptcp_get_ext(skb); 1287 if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) { 1288 TCP_SKB_CB(skb)->eor = 1; 1289 tcp_mark_push(tcp_sk(ssk), skb); 1290 goto alloc_skb; 1291 } 1292 1293 i = skb_shinfo(skb)->nr_frags; 1294 can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); 1295 if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { 1296 tcp_mark_push(tcp_sk(ssk), skb); 1297 goto alloc_skb; 1298 } 1299 1300 copy -= skb->len; 1301 } else { 1302 alloc_skb: 1303 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); 1304 if (!skb) 1305 return -ENOMEM; 1306 1307 i = skb_shinfo(skb)->nr_frags; 1308 reuse_skb = false; 1309 mpext = mptcp_get_ext(skb); 1310 } 1311 1312 /* Zero window and all data acked? Probe. */ 1313 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); 1314 if (copy == 0) { 1315 u64 snd_una = READ_ONCE(msk->snd_una); 1316 1317 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { 1318 tcp_remove_empty_skb(ssk); 1319 return 0; 1320 } 1321 1322 zero_window_probe = true; 1323 data_seq = snd_una - 1; 1324 copy = 1; 1325 } 1326 1327 copy = min_t(size_t, copy, info->limit - info->sent); 1328 if (!sk_wmem_schedule(ssk, copy)) { 1329 tcp_remove_empty_skb(ssk); 1330 return -ENOMEM; 1331 } 1332 1333 if (can_coalesce) { 1334 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1335 } else { 1336 get_page(dfrag->page); 1337 skb_fill_page_desc(skb, i, dfrag->page, offset, copy); 1338 } 1339 1340 skb->len += copy; 1341 skb->data_len += copy; 1342 skb->truesize += copy; 1343 sk_wmem_queued_add(ssk, copy); 1344 sk_mem_charge(ssk, copy); 1345 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); 1346 TCP_SKB_CB(skb)->end_seq += copy; 1347 tcp_skb_pcount_set(skb, 0); 1348 1349 /* on skb reuse we just need to update the DSS len */ 1350 if (reuse_skb) { 1351 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1352 mpext->data_len += copy; 1353 goto out; 1354 } 1355 1356 memset(mpext, 0, sizeof(*mpext)); 1357 mpext->data_seq = data_seq; 1358 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; 1359 mpext->data_len = copy; 1360 mpext->use_map = 1; 1361 mpext->dsn64 = 1; 1362 1363 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n", 1364 mpext->data_seq, mpext->subflow_seq, mpext->data_len, 1365 mpext->dsn64); 1366 1367 if (zero_window_probe) { 1368 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; 1369 mpext->frozen = 1; 1370 if (READ_ONCE(msk->csum_enabled)) 1371 mptcp_update_data_checksum(skb, copy); 1372 tcp_push_pending_frames(ssk); 1373 return 0; 1374 } 1375 out: 1376 if (READ_ONCE(msk->csum_enabled)) 1377 mptcp_update_data_checksum(skb, copy); 1378 if (mptcp_subflow_ctx(ssk)->send_infinite_map) 1379 mptcp_update_infinite_map(msk, ssk, mpext); 1380 trace_mptcp_sendmsg_frag(mpext); 1381 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; 1382 return copy; 1383 } 1384 1385 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ 1386 sizeof(struct tcphdr) - \ 1387 MAX_TCP_OPTION_SPACE - \ 1388 sizeof(struct ipv6hdr) - \ 1389 sizeof(struct frag_hdr)) 1390 1391 struct subflow_send_info { 1392 struct sock *ssk; 1393 u64 linger_time; 1394 }; 1395 1396 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) 1397 { 1398 if (!subflow->stale) 1399 return; 1400 1401 subflow->stale = 0; 1402 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); 1403 } 1404 1405 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) 1406 { 1407 if (unlikely(subflow->stale)) { 1408 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); 1409 1410 if (subflow->stale_rcv_tstamp == rcv_tstamp) 1411 return false; 1412 1413 mptcp_subflow_set_active(subflow); 1414 } 1415 return __mptcp_subflow_active(subflow); 1416 } 1417 1418 #define SSK_MODE_ACTIVE 0 1419 #define SSK_MODE_BACKUP 1 1420 #define SSK_MODE_MAX 2 1421 1422 /* implement the mptcp packet scheduler; 1423 * returns the subflow that will transmit the next DSS 1424 * additionally updates the rtx timeout 1425 */ 1426 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) 1427 { 1428 struct subflow_send_info send_info[SSK_MODE_MAX]; 1429 struct mptcp_subflow_context *subflow; 1430 struct sock *sk = (struct sock *)msk; 1431 u32 pace, burst, wmem; 1432 int i, nr_active = 0; 1433 struct sock *ssk; 1434 u64 linger_time; 1435 long tout = 0; 1436 1437 /* pick the subflow with the lower wmem/wspace ratio */ 1438 for (i = 0; i < SSK_MODE_MAX; ++i) { 1439 send_info[i].ssk = NULL; 1440 send_info[i].linger_time = -1; 1441 } 1442 1443 mptcp_for_each_subflow(msk, subflow) { 1444 bool backup = subflow->backup || subflow->request_bkup; 1445 1446 trace_mptcp_subflow_get_send(subflow); 1447 ssk = mptcp_subflow_tcp_sock(subflow); 1448 if (!mptcp_subflow_active(subflow)) 1449 continue; 1450 1451 tout = max(tout, mptcp_timeout_from_subflow(subflow)); 1452 nr_active += !backup; 1453 pace = subflow->avg_pacing_rate; 1454 if (unlikely(!pace)) { 1455 /* init pacing rate from socket */ 1456 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); 1457 pace = subflow->avg_pacing_rate; 1458 if (!pace) 1459 continue; 1460 } 1461 1462 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); 1463 if (linger_time < send_info[backup].linger_time) { 1464 send_info[backup].ssk = ssk; 1465 send_info[backup].linger_time = linger_time; 1466 } 1467 } 1468 __mptcp_set_timeout(sk, tout); 1469 1470 /* pick the best backup if no other subflow is active */ 1471 if (!nr_active) 1472 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; 1473 1474 /* According to the blest algorithm, to avoid HoL blocking for the 1475 * faster flow, we need to: 1476 * - estimate the faster flow linger time 1477 * - use the above to estimate the amount of byte transferred 1478 * by the faster flow 1479 * - check that the amount of queued data is greter than the above, 1480 * otherwise do not use the picked, slower, subflow 1481 * We select the subflow with the shorter estimated time to flush 1482 * the queued mem, which basically ensure the above. We just need 1483 * to check that subflow has a non empty cwin. 1484 */ 1485 ssk = send_info[SSK_MODE_ACTIVE].ssk; 1486 if (!ssk || !sk_stream_memory_free(ssk)) 1487 return NULL; 1488 1489 burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); 1490 wmem = READ_ONCE(ssk->sk_wmem_queued); 1491 if (!burst) 1492 return ssk; 1493 1494 subflow = mptcp_subflow_ctx(ssk); 1495 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + 1496 READ_ONCE(ssk->sk_pacing_rate) * burst, 1497 burst + wmem); 1498 msk->snd_burst = burst; 1499 return ssk; 1500 } 1501 1502 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) 1503 { 1504 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); 1505 release_sock(ssk); 1506 } 1507 1508 static void mptcp_update_post_push(struct mptcp_sock *msk, 1509 struct mptcp_data_frag *dfrag, 1510 u32 sent) 1511 { 1512 u64 snd_nxt_new = dfrag->data_seq; 1513 1514 dfrag->already_sent += sent; 1515 1516 msk->snd_burst -= sent; 1517 1518 snd_nxt_new += dfrag->already_sent; 1519 1520 /* snd_nxt_new can be smaller than snd_nxt in case mptcp 1521 * is recovering after a failover. In that event, this re-sends 1522 * old segments. 1523 * 1524 * Thus compute snd_nxt_new candidate based on 1525 * the dfrag->data_seq that was sent and the data 1526 * that has been handed to the subflow for transmission 1527 * and skip update in case it was old dfrag. 1528 */ 1529 if (likely(after64(snd_nxt_new, msk->snd_nxt))) { 1530 msk->bytes_sent += snd_nxt_new - msk->snd_nxt; 1531 WRITE_ONCE(msk->snd_nxt, snd_nxt_new); 1532 } 1533 } 1534 1535 void mptcp_check_and_set_pending(struct sock *sk) 1536 { 1537 if (mptcp_send_head(sk)) { 1538 mptcp_data_lock(sk); 1539 mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING); 1540 mptcp_data_unlock(sk); 1541 } 1542 } 1543 1544 static int __subflow_push_pending(struct sock *sk, struct sock *ssk, 1545 struct mptcp_sendmsg_info *info) 1546 { 1547 struct mptcp_sock *msk = mptcp_sk(sk); 1548 struct mptcp_data_frag *dfrag; 1549 int len, copied = 0, err = 0; 1550 1551 while ((dfrag = mptcp_send_head(sk))) { 1552 info->sent = dfrag->already_sent; 1553 info->limit = dfrag->data_len; 1554 len = dfrag->data_len - dfrag->already_sent; 1555 while (len > 0) { 1556 int ret = 0; 1557 1558 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info); 1559 if (ret <= 0) { 1560 err = copied ? : ret; 1561 goto out; 1562 } 1563 1564 info->sent += ret; 1565 copied += ret; 1566 len -= ret; 1567 1568 mptcp_update_post_push(msk, dfrag, ret); 1569 } 1570 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); 1571 1572 if (msk->snd_burst <= 0 || 1573 !sk_stream_memory_free(ssk) || 1574 !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) { 1575 err = copied; 1576 goto out; 1577 } 1578 mptcp_set_timeout(sk); 1579 } 1580 err = copied; 1581 1582 out: 1583 if (err > 0) 1584 msk->last_data_sent = tcp_jiffies32; 1585 return err; 1586 } 1587 1588 void __mptcp_push_pending(struct sock *sk, unsigned int flags) 1589 { 1590 struct sock *prev_ssk = NULL, *ssk = NULL; 1591 struct mptcp_sock *msk = mptcp_sk(sk); 1592 struct mptcp_sendmsg_info info = { 1593 .flags = flags, 1594 }; 1595 bool do_check_data_fin = false; 1596 int push_count = 1; 1597 1598 while (mptcp_send_head(sk) && (push_count > 0)) { 1599 struct mptcp_subflow_context *subflow; 1600 int ret = 0; 1601 1602 if (mptcp_sched_get_send(msk)) 1603 break; 1604 1605 push_count = 0; 1606 1607 mptcp_for_each_subflow(msk, subflow) { 1608 if (READ_ONCE(subflow->scheduled)) { 1609 mptcp_subflow_set_scheduled(subflow, false); 1610 1611 prev_ssk = ssk; 1612 ssk = mptcp_subflow_tcp_sock(subflow); 1613 if (ssk != prev_ssk) { 1614 /* First check. If the ssk has changed since 1615 * the last round, release prev_ssk 1616 */ 1617 if (prev_ssk) 1618 mptcp_push_release(prev_ssk, &info); 1619 1620 /* Need to lock the new subflow only if different 1621 * from the previous one, otherwise we are still 1622 * helding the relevant lock 1623 */ 1624 lock_sock(ssk); 1625 } 1626 1627 push_count++; 1628 1629 ret = __subflow_push_pending(sk, ssk, &info); 1630 if (ret <= 0) { 1631 if (ret != -EAGAIN || 1632 (1 << ssk->sk_state) & 1633 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE)) 1634 push_count--; 1635 continue; 1636 } 1637 do_check_data_fin = true; 1638 } 1639 } 1640 } 1641 1642 /* at this point we held the socket lock for the last subflow we used */ 1643 if (ssk) 1644 mptcp_push_release(ssk, &info); 1645 1646 /* ensure the rtx timer is running */ 1647 if (!mptcp_rtx_timer_pending(sk)) 1648 mptcp_reset_rtx_timer(sk); 1649 if (do_check_data_fin) 1650 mptcp_check_send_data_fin(sk); 1651 } 1652 1653 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first) 1654 { 1655 struct mptcp_sock *msk = mptcp_sk(sk); 1656 struct mptcp_sendmsg_info info = { 1657 .data_lock_held = true, 1658 }; 1659 bool keep_pushing = true; 1660 struct sock *xmit_ssk; 1661 int copied = 0; 1662 1663 info.flags = 0; 1664 while (mptcp_send_head(sk) && keep_pushing) { 1665 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 1666 int ret = 0; 1667 1668 /* check for a different subflow usage only after 1669 * spooling the first chunk of data 1670 */ 1671 if (first) { 1672 mptcp_subflow_set_scheduled(subflow, false); 1673 ret = __subflow_push_pending(sk, ssk, &info); 1674 first = false; 1675 if (ret <= 0) 1676 break; 1677 copied += ret; 1678 continue; 1679 } 1680 1681 if (mptcp_sched_get_send(msk)) 1682 goto out; 1683 1684 if (READ_ONCE(subflow->scheduled)) { 1685 mptcp_subflow_set_scheduled(subflow, false); 1686 ret = __subflow_push_pending(sk, ssk, &info); 1687 if (ret <= 0) 1688 keep_pushing = false; 1689 copied += ret; 1690 } 1691 1692 mptcp_for_each_subflow(msk, subflow) { 1693 if (READ_ONCE(subflow->scheduled)) { 1694 xmit_ssk = mptcp_subflow_tcp_sock(subflow); 1695 if (xmit_ssk != ssk) { 1696 mptcp_subflow_delegate(subflow, 1697 MPTCP_DELEGATE_SEND); 1698 keep_pushing = false; 1699 } 1700 } 1701 } 1702 } 1703 1704 out: 1705 /* __mptcp_alloc_tx_skb could have released some wmem and we are 1706 * not going to flush it via release_sock() 1707 */ 1708 if (copied) { 1709 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, 1710 info.size_goal); 1711 if (!mptcp_rtx_timer_pending(sk)) 1712 mptcp_reset_rtx_timer(sk); 1713 1714 if (msk->snd_data_fin_enable && 1715 msk->snd_nxt + 1 == msk->write_seq) 1716 mptcp_schedule_work(sk); 1717 } 1718 } 1719 1720 static int mptcp_disconnect(struct sock *sk, int flags); 1721 1722 static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, 1723 size_t len, int *copied_syn) 1724 { 1725 unsigned int saved_flags = msg->msg_flags; 1726 struct mptcp_sock *msk = mptcp_sk(sk); 1727 struct sock *ssk; 1728 int ret; 1729 1730 /* on flags based fastopen the mptcp is supposed to create the 1731 * first subflow right now. Otherwise we are in the defer_connect 1732 * path, and the first subflow must be already present. 1733 * Since the defer_connect flag is cleared after the first succsful 1734 * fastopen attempt, no need to check for additional subflow status. 1735 */ 1736 if (msg->msg_flags & MSG_FASTOPEN) { 1737 ssk = __mptcp_nmpc_sk(msk); 1738 if (IS_ERR(ssk)) 1739 return PTR_ERR(ssk); 1740 } 1741 if (!msk->first) 1742 return -EINVAL; 1743 1744 ssk = msk->first; 1745 1746 lock_sock(ssk); 1747 msg->msg_flags |= MSG_DONTWAIT; 1748 msk->fastopening = 1; 1749 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); 1750 msk->fastopening = 0; 1751 msg->msg_flags = saved_flags; 1752 release_sock(ssk); 1753 1754 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */ 1755 if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) { 1756 ret = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1757 msg->msg_namelen, msg->msg_flags, 1); 1758 1759 /* Keep the same behaviour of plain TCP: zero the copied bytes in 1760 * case of any error, except timeout or signal 1761 */ 1762 if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR) 1763 *copied_syn = 0; 1764 } else if (ret && ret != -EINPROGRESS) { 1765 /* The disconnect() op called by tcp_sendmsg_fastopen()/ 1766 * __inet_stream_connect() can fail, due to looking check, 1767 * see mptcp_disconnect(). 1768 * Attempt it again outside the problematic scope. 1769 */ 1770 if (!mptcp_disconnect(sk, 0)) 1771 sk->sk_socket->state = SS_UNCONNECTED; 1772 } 1773 inet_clear_bit(DEFER_CONNECT, sk); 1774 1775 return ret; 1776 } 1777 1778 static int do_copy_data_nocache(struct sock *sk, int copy, 1779 struct iov_iter *from, char *to) 1780 { 1781 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { 1782 if (!copy_from_iter_full_nocache(to, copy, from)) 1783 return -EFAULT; 1784 } else if (!copy_from_iter_full(to, copy, from)) { 1785 return -EFAULT; 1786 } 1787 return 0; 1788 } 1789 1790 /* open-code sk_stream_memory_free() plus sent limit computation to 1791 * avoid indirect calls in fast-path. 1792 * Called under the msk socket lock, so we can avoid a bunch of ONCE 1793 * annotations. 1794 */ 1795 static u32 mptcp_send_limit(const struct sock *sk) 1796 { 1797 const struct mptcp_sock *msk = mptcp_sk(sk); 1798 u32 limit, not_sent; 1799 1800 if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf)) 1801 return 0; 1802 1803 limit = mptcp_notsent_lowat(sk); 1804 if (limit == UINT_MAX) 1805 return UINT_MAX; 1806 1807 not_sent = msk->write_seq - msk->snd_nxt; 1808 if (not_sent >= limit) 1809 return 0; 1810 1811 return limit - not_sent; 1812 } 1813 1814 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1815 { 1816 struct mptcp_sock *msk = mptcp_sk(sk); 1817 struct page_frag *pfrag; 1818 size_t copied = 0; 1819 int ret = 0; 1820 long timeo; 1821 1822 /* silently ignore everything else */ 1823 msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_FASTOPEN; 1824 1825 lock_sock(sk); 1826 1827 if (unlikely(inet_test_bit(DEFER_CONNECT, sk) || 1828 msg->msg_flags & MSG_FASTOPEN)) { 1829 int copied_syn = 0; 1830 1831 ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn); 1832 copied += copied_syn; 1833 if (ret == -EINPROGRESS && copied_syn > 0) 1834 goto out; 1835 else if (ret) 1836 goto do_error; 1837 } 1838 1839 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1840 1841 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { 1842 ret = sk_stream_wait_connect(sk, &timeo); 1843 if (ret) 1844 goto do_error; 1845 } 1846 1847 ret = -EPIPE; 1848 if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))) 1849 goto do_error; 1850 1851 pfrag = sk_page_frag(sk); 1852 1853 while (msg_data_left(msg)) { 1854 int total_ts, frag_truesize = 0; 1855 struct mptcp_data_frag *dfrag; 1856 bool dfrag_collapsed; 1857 size_t psize, offset; 1858 u32 copy_limit; 1859 1860 /* ensure fitting the notsent_lowat() constraint */ 1861 copy_limit = mptcp_send_limit(sk); 1862 if (!copy_limit) 1863 goto wait_for_memory; 1864 1865 /* reuse tail pfrag, if possible, or carve a new one from the 1866 * page allocator 1867 */ 1868 dfrag = mptcp_pending_tail(sk); 1869 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); 1870 if (!dfrag_collapsed) { 1871 if (!mptcp_page_frag_refill(sk, pfrag)) 1872 goto wait_for_memory; 1873 1874 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); 1875 frag_truesize = dfrag->overhead; 1876 } 1877 1878 /* we do not bound vs wspace, to allow a single packet. 1879 * memory accounting will prevent execessive memory usage 1880 * anyway 1881 */ 1882 offset = dfrag->offset + dfrag->data_len; 1883 psize = pfrag->size - offset; 1884 psize = min_t(size_t, psize, msg_data_left(msg)); 1885 psize = min_t(size_t, psize, copy_limit); 1886 total_ts = psize + frag_truesize; 1887 1888 if (!sk_wmem_schedule(sk, total_ts)) 1889 goto wait_for_memory; 1890 1891 ret = do_copy_data_nocache(sk, psize, &msg->msg_iter, 1892 page_address(dfrag->page) + offset); 1893 if (ret) 1894 goto do_error; 1895 1896 /* data successfully copied into the write queue */ 1897 sk_forward_alloc_add(sk, -total_ts); 1898 copied += psize; 1899 dfrag->data_len += psize; 1900 frag_truesize += psize; 1901 pfrag->offset += frag_truesize; 1902 WRITE_ONCE(msk->write_seq, msk->write_seq + psize); 1903 1904 /* charge data on mptcp pending queue to the msk socket 1905 * Note: we charge such data both to sk and ssk 1906 */ 1907 sk_wmem_queued_add(sk, frag_truesize); 1908 if (!dfrag_collapsed) { 1909 get_page(dfrag->page); 1910 list_add_tail(&dfrag->list, &msk->rtx_queue); 1911 if (!msk->first_pending) 1912 WRITE_ONCE(msk->first_pending, dfrag); 1913 } 1914 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, 1915 dfrag->data_seq, dfrag->data_len, dfrag->already_sent, 1916 !dfrag_collapsed); 1917 1918 continue; 1919 1920 wait_for_memory: 1921 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1922 __mptcp_push_pending(sk, msg->msg_flags); 1923 ret = sk_stream_wait_memory(sk, &timeo); 1924 if (ret) 1925 goto do_error; 1926 } 1927 1928 if (copied) 1929 __mptcp_push_pending(sk, msg->msg_flags); 1930 1931 out: 1932 release_sock(sk); 1933 return copied; 1934 1935 do_error: 1936 if (copied) 1937 goto out; 1938 1939 copied = sk_stream_error(sk, msg->msg_flags, ret); 1940 goto out; 1941 } 1942 1943 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); 1944 1945 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, 1946 struct msghdr *msg, 1947 size_t len, int flags, 1948 struct scm_timestamping_internal *tss, 1949 int *cmsg_flags) 1950 { 1951 struct sk_buff *skb, *tmp; 1952 int copied = 0; 1953 1954 skb_queue_walk_safe(&msk->receive_queue, skb, tmp) { 1955 u32 offset = MPTCP_SKB_CB(skb)->offset; 1956 u32 data_len = skb->len - offset; 1957 u32 count = min_t(size_t, len - copied, data_len); 1958 int err; 1959 1960 if (!(flags & MSG_TRUNC)) { 1961 err = skb_copy_datagram_msg(skb, offset, msg, count); 1962 if (unlikely(err < 0)) { 1963 if (!copied) 1964 return err; 1965 break; 1966 } 1967 } 1968 1969 if (MPTCP_SKB_CB(skb)->has_rxtstamp) { 1970 tcp_update_recv_tstamps(skb, tss); 1971 *cmsg_flags |= MPTCP_CMSG_TS; 1972 } 1973 1974 copied += count; 1975 1976 if (count < data_len) { 1977 if (!(flags & MSG_PEEK)) { 1978 MPTCP_SKB_CB(skb)->offset += count; 1979 MPTCP_SKB_CB(skb)->map_seq += count; 1980 msk->bytes_consumed += count; 1981 } 1982 break; 1983 } 1984 1985 if (!(flags & MSG_PEEK)) { 1986 /* we will bulk release the skb memory later */ 1987 skb->destructor = NULL; 1988 WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); 1989 __skb_unlink(skb, &msk->receive_queue); 1990 __kfree_skb(skb); 1991 msk->bytes_consumed += count; 1992 } 1993 1994 if (copied >= len) 1995 break; 1996 } 1997 1998 mptcp_rcv_space_adjust(msk, copied); 1999 return copied; 2000 } 2001 2002 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. 2003 * 2004 * Only difference: Use highest rtt estimate of the subflows in use. 2005 */ 2006 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) 2007 { 2008 struct mptcp_subflow_context *subflow; 2009 struct sock *sk = (struct sock *)msk; 2010 u8 scaling_ratio = U8_MAX; 2011 u32 time, advmss = 1; 2012 u64 rtt_us, mstamp; 2013 2014 msk_owned_by_me(msk); 2015 2016 if (copied <= 0) 2017 return; 2018 2019 if (!msk->rcvspace_init) 2020 mptcp_rcv_space_init(msk, msk->first); 2021 2022 msk->rcvq_space.copied += copied; 2023 2024 mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); 2025 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); 2026 2027 rtt_us = msk->rcvq_space.rtt_us; 2028 if (rtt_us && time < (rtt_us >> 3)) 2029 return; 2030 2031 rtt_us = 0; 2032 mptcp_for_each_subflow(msk, subflow) { 2033 const struct tcp_sock *tp; 2034 u64 sf_rtt_us; 2035 u32 sf_advmss; 2036 2037 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); 2038 2039 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); 2040 sf_advmss = READ_ONCE(tp->advmss); 2041 2042 rtt_us = max(sf_rtt_us, rtt_us); 2043 advmss = max(sf_advmss, advmss); 2044 scaling_ratio = min(tp->scaling_ratio, scaling_ratio); 2045 } 2046 2047 msk->rcvq_space.rtt_us = rtt_us; 2048 msk->scaling_ratio = scaling_ratio; 2049 if (time < (rtt_us >> 3) || rtt_us == 0) 2050 return; 2051 2052 if (msk->rcvq_space.copied <= msk->rcvq_space.space) 2053 goto new_measure; 2054 2055 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && 2056 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 2057 u64 rcvwin, grow; 2058 int rcvbuf; 2059 2060 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; 2061 2062 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); 2063 2064 do_div(grow, msk->rcvq_space.space); 2065 rcvwin += (grow << 1); 2066 2067 rcvbuf = min_t(u64, mptcp_space_from_win(sk, rcvwin), 2068 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); 2069 2070 if (rcvbuf > sk->sk_rcvbuf) { 2071 u32 window_clamp; 2072 2073 window_clamp = mptcp_win_from_space(sk, rcvbuf); 2074 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 2075 2076 /* Make subflows follow along. If we do not do this, we 2077 * get drops at subflow level if skbs can't be moved to 2078 * the mptcp rx queue fast enough (announced rcv_win can 2079 * exceed ssk->sk_rcvbuf). 2080 */ 2081 mptcp_for_each_subflow(msk, subflow) { 2082 struct sock *ssk; 2083 bool slow; 2084 2085 ssk = mptcp_subflow_tcp_sock(subflow); 2086 slow = lock_sock_fast(ssk); 2087 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); 2088 WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp); 2089 if (tcp_can_send_ack(ssk)) 2090 tcp_cleanup_rbuf(ssk, 1); 2091 unlock_sock_fast(ssk, slow); 2092 } 2093 } 2094 } 2095 2096 msk->rcvq_space.space = msk->rcvq_space.copied; 2097 new_measure: 2098 msk->rcvq_space.copied = 0; 2099 msk->rcvq_space.time = mstamp; 2100 } 2101 2102 static void __mptcp_update_rmem(struct sock *sk) 2103 { 2104 struct mptcp_sock *msk = mptcp_sk(sk); 2105 2106 if (!msk->rmem_released) 2107 return; 2108 2109 atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); 2110 mptcp_rmem_uncharge(sk, msk->rmem_released); 2111 WRITE_ONCE(msk->rmem_released, 0); 2112 } 2113 2114 static void __mptcp_splice_receive_queue(struct sock *sk) 2115 { 2116 struct mptcp_sock *msk = mptcp_sk(sk); 2117 2118 skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue); 2119 } 2120 2121 static bool __mptcp_move_skbs(struct mptcp_sock *msk) 2122 { 2123 struct sock *sk = (struct sock *)msk; 2124 unsigned int moved = 0; 2125 bool ret, done; 2126 2127 do { 2128 struct sock *ssk = mptcp_subflow_recv_lookup(msk); 2129 bool slowpath; 2130 2131 /* we can have data pending in the subflows only if the msk 2132 * receive buffer was full at subflow_data_ready() time, 2133 * that is an unlikely slow path. 2134 */ 2135 if (likely(!ssk)) 2136 break; 2137 2138 slowpath = lock_sock_fast(ssk); 2139 mptcp_data_lock(sk); 2140 __mptcp_update_rmem(sk); 2141 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); 2142 mptcp_data_unlock(sk); 2143 2144 if (unlikely(ssk->sk_err)) 2145 __mptcp_error_report(sk); 2146 unlock_sock_fast(ssk, slowpath); 2147 } while (!done); 2148 2149 /* acquire the data lock only if some input data is pending */ 2150 ret = moved > 0; 2151 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) || 2152 !skb_queue_empty_lockless(&sk->sk_receive_queue)) { 2153 mptcp_data_lock(sk); 2154 __mptcp_update_rmem(sk); 2155 ret |= __mptcp_ofo_queue(msk); 2156 __mptcp_splice_receive_queue(sk); 2157 mptcp_data_unlock(sk); 2158 } 2159 if (ret) 2160 mptcp_check_data_fin((struct sock *)msk); 2161 return !skb_queue_empty(&msk->receive_queue); 2162 } 2163 2164 static unsigned int mptcp_inq_hint(const struct sock *sk) 2165 { 2166 const struct mptcp_sock *msk = mptcp_sk(sk); 2167 const struct sk_buff *skb; 2168 2169 skb = skb_peek(&msk->receive_queue); 2170 if (skb) { 2171 u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq; 2172 2173 if (hint_val >= INT_MAX) 2174 return INT_MAX; 2175 2176 return (unsigned int)hint_val; 2177 } 2178 2179 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 2180 return 1; 2181 2182 return 0; 2183 } 2184 2185 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2186 int flags, int *addr_len) 2187 { 2188 struct mptcp_sock *msk = mptcp_sk(sk); 2189 struct scm_timestamping_internal tss; 2190 int copied = 0, cmsg_flags = 0; 2191 int target; 2192 long timeo; 2193 2194 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */ 2195 if (unlikely(flags & MSG_ERRQUEUE)) 2196 return inet_recv_error(sk, msg, len, addr_len); 2197 2198 lock_sock(sk); 2199 if (unlikely(sk->sk_state == TCP_LISTEN)) { 2200 copied = -ENOTCONN; 2201 goto out_err; 2202 } 2203 2204 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2205 2206 len = min_t(size_t, len, INT_MAX); 2207 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2208 2209 if (unlikely(msk->recvmsg_inq)) 2210 cmsg_flags = MPTCP_CMSG_INQ; 2211 2212 while (copied < len) { 2213 int err, bytes_read; 2214 2215 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); 2216 if (unlikely(bytes_read < 0)) { 2217 if (!copied) 2218 copied = bytes_read; 2219 goto out_err; 2220 } 2221 2222 copied += bytes_read; 2223 2224 if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk)) 2225 continue; 2226 2227 /* only the MPTCP socket status is relevant here. The exit 2228 * conditions mirror closely tcp_recvmsg() 2229 */ 2230 if (copied >= target) 2231 break; 2232 2233 if (copied) { 2234 if (sk->sk_err || 2235 sk->sk_state == TCP_CLOSE || 2236 (sk->sk_shutdown & RCV_SHUTDOWN) || 2237 !timeo || 2238 signal_pending(current)) 2239 break; 2240 } else { 2241 if (sk->sk_err) { 2242 copied = sock_error(sk); 2243 break; 2244 } 2245 2246 if (sk->sk_shutdown & RCV_SHUTDOWN) { 2247 /* race breaker: the shutdown could be after the 2248 * previous receive queue check 2249 */ 2250 if (__mptcp_move_skbs(msk)) 2251 continue; 2252 break; 2253 } 2254 2255 if (sk->sk_state == TCP_CLOSE) { 2256 copied = -ENOTCONN; 2257 break; 2258 } 2259 2260 if (!timeo) { 2261 copied = -EAGAIN; 2262 break; 2263 } 2264 2265 if (signal_pending(current)) { 2266 copied = sock_intr_errno(timeo); 2267 break; 2268 } 2269 } 2270 2271 pr_debug("block timeout %ld\n", timeo); 2272 mptcp_cleanup_rbuf(msk, copied); 2273 err = sk_wait_data(sk, &timeo, NULL); 2274 if (err < 0) { 2275 err = copied ? : err; 2276 goto out_err; 2277 } 2278 } 2279 2280 mptcp_cleanup_rbuf(msk, copied); 2281 2282 out_err: 2283 if (cmsg_flags && copied >= 0) { 2284 if (cmsg_flags & MPTCP_CMSG_TS) 2285 tcp_recv_timestamp(msg, sk, &tss); 2286 2287 if (cmsg_flags & MPTCP_CMSG_INQ) { 2288 unsigned int inq = mptcp_inq_hint(sk); 2289 2290 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); 2291 } 2292 } 2293 2294 pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", 2295 msk, skb_queue_empty_lockless(&sk->sk_receive_queue), 2296 skb_queue_empty(&msk->receive_queue), copied); 2297 2298 release_sock(sk); 2299 return copied; 2300 } 2301 2302 static void mptcp_retransmit_timer(struct timer_list *t) 2303 { 2304 struct inet_connection_sock *icsk = from_timer(icsk, t, 2305 icsk_retransmit_timer); 2306 struct sock *sk = &icsk->icsk_inet.sk; 2307 struct mptcp_sock *msk = mptcp_sk(sk); 2308 2309 bh_lock_sock(sk); 2310 if (!sock_owned_by_user(sk)) { 2311 /* we need a process context to retransmit */ 2312 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags)) 2313 mptcp_schedule_work(sk); 2314 } else { 2315 /* delegate our work to tcp_release_cb() */ 2316 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags); 2317 } 2318 bh_unlock_sock(sk); 2319 sock_put(sk); 2320 } 2321 2322 static void mptcp_tout_timer(struct timer_list *t) 2323 { 2324 struct sock *sk = from_timer(sk, t, sk_timer); 2325 2326 mptcp_schedule_work(sk); 2327 sock_put(sk); 2328 } 2329 2330 /* Find an idle subflow. Return NULL if there is unacked data at tcp 2331 * level. 2332 * 2333 * A backup subflow is returned only if that is the only kind available. 2334 */ 2335 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) 2336 { 2337 struct sock *backup = NULL, *pick = NULL; 2338 struct mptcp_subflow_context *subflow; 2339 int min_stale_count = INT_MAX; 2340 2341 mptcp_for_each_subflow(msk, subflow) { 2342 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2343 2344 if (!__mptcp_subflow_active(subflow)) 2345 continue; 2346 2347 /* still data outstanding at TCP level? skip this */ 2348 if (!tcp_rtx_and_write_queues_empty(ssk)) { 2349 mptcp_pm_subflow_chk_stale(msk, ssk); 2350 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); 2351 continue; 2352 } 2353 2354 if (subflow->backup || subflow->request_bkup) { 2355 if (!backup) 2356 backup = ssk; 2357 continue; 2358 } 2359 2360 if (!pick) 2361 pick = ssk; 2362 } 2363 2364 if (pick) 2365 return pick; 2366 2367 /* use backup only if there are no progresses anywhere */ 2368 return min_stale_count > 1 ? backup : NULL; 2369 } 2370 2371 bool __mptcp_retransmit_pending_data(struct sock *sk) 2372 { 2373 struct mptcp_data_frag *cur, *rtx_head; 2374 struct mptcp_sock *msk = mptcp_sk(sk); 2375 2376 if (__mptcp_check_fallback(msk)) 2377 return false; 2378 2379 /* the closing socket has some data untransmitted and/or unacked: 2380 * some data in the mptcp rtx queue has not really xmitted yet. 2381 * keep it simple and re-inject the whole mptcp level rtx queue 2382 */ 2383 mptcp_data_lock(sk); 2384 __mptcp_clean_una_wakeup(sk); 2385 rtx_head = mptcp_rtx_head(sk); 2386 if (!rtx_head) { 2387 mptcp_data_unlock(sk); 2388 return false; 2389 } 2390 2391 msk->recovery_snd_nxt = msk->snd_nxt; 2392 msk->recovery = true; 2393 mptcp_data_unlock(sk); 2394 2395 msk->first_pending = rtx_head; 2396 msk->snd_burst = 0; 2397 2398 /* be sure to clear the "sent status" on all re-injected fragments */ 2399 list_for_each_entry(cur, &msk->rtx_queue, list) { 2400 if (!cur->already_sent) 2401 break; 2402 cur->already_sent = 0; 2403 } 2404 2405 return true; 2406 } 2407 2408 /* flags for __mptcp_close_ssk() */ 2409 #define MPTCP_CF_PUSH BIT(1) 2410 #define MPTCP_CF_FASTCLOSE BIT(2) 2411 2412 /* be sure to send a reset only if the caller asked for it, also 2413 * clean completely the subflow status when the subflow reaches 2414 * TCP_CLOSE state 2415 */ 2416 static void __mptcp_subflow_disconnect(struct sock *ssk, 2417 struct mptcp_subflow_context *subflow, 2418 unsigned int flags) 2419 { 2420 if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || 2421 (flags & MPTCP_CF_FASTCLOSE)) { 2422 /* The MPTCP code never wait on the subflow sockets, TCP-level 2423 * disconnect should never fail 2424 */ 2425 WARN_ON_ONCE(tcp_disconnect(ssk, 0)); 2426 mptcp_subflow_ctx_reset(subflow); 2427 } else { 2428 tcp_shutdown(ssk, SEND_SHUTDOWN); 2429 } 2430 } 2431 2432 /* subflow sockets can be either outgoing (connect) or incoming 2433 * (accept). 2434 * 2435 * Outgoing subflows use in-kernel sockets. 2436 * Incoming subflows do not have their own 'struct socket' allocated, 2437 * so we need to use tcp_close() after detaching them from the mptcp 2438 * parent socket. 2439 */ 2440 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, 2441 struct mptcp_subflow_context *subflow, 2442 unsigned int flags) 2443 { 2444 struct mptcp_sock *msk = mptcp_sk(sk); 2445 bool dispose_it, need_push = false; 2446 2447 /* If the first subflow moved to a close state before accept, e.g. due 2448 * to an incoming reset or listener shutdown, the subflow socket is 2449 * already deleted by inet_child_forget() and the mptcp socket can't 2450 * survive too. 2451 */ 2452 if (msk->in_accept_queue && msk->first == ssk && 2453 (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) { 2454 /* ensure later check in mptcp_worker() will dispose the msk */ 2455 sock_set_flag(sk, SOCK_DEAD); 2456 mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1)); 2457 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); 2458 mptcp_subflow_drop_ctx(ssk); 2459 goto out_release; 2460 } 2461 2462 dispose_it = msk->free_first || ssk != msk->first; 2463 if (dispose_it) 2464 list_del(&subflow->node); 2465 2466 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); 2467 2468 if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) { 2469 /* be sure to force the tcp_close path 2470 * to generate the egress reset 2471 */ 2472 ssk->sk_lingertime = 0; 2473 sock_set_flag(ssk, SOCK_LINGER); 2474 subflow->send_fastclose = 1; 2475 } 2476 2477 need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); 2478 if (!dispose_it) { 2479 __mptcp_subflow_disconnect(ssk, subflow, flags); 2480 release_sock(ssk); 2481 2482 goto out; 2483 } 2484 2485 subflow->disposable = 1; 2486 2487 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops 2488 * the ssk has been already destroyed, we just need to release the 2489 * reference owned by msk; 2490 */ 2491 if (!inet_csk(ssk)->icsk_ulp_ops) { 2492 WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD)); 2493 kfree_rcu(subflow, rcu); 2494 } else { 2495 /* otherwise tcp will dispose of the ssk and subflow ctx */ 2496 __tcp_close(ssk, 0); 2497 2498 /* close acquired an extra ref */ 2499 __sock_put(ssk); 2500 } 2501 2502 out_release: 2503 __mptcp_subflow_error_report(sk, ssk); 2504 release_sock(ssk); 2505 2506 sock_put(ssk); 2507 2508 if (ssk == msk->first) 2509 WRITE_ONCE(msk->first, NULL); 2510 2511 out: 2512 __mptcp_sync_sndbuf(sk); 2513 if (need_push) 2514 __mptcp_push_pending(sk, 0); 2515 2516 /* Catch every 'all subflows closed' scenario, including peers silently 2517 * closing them, e.g. due to timeout. 2518 * For established sockets, allow an additional timeout before closing, 2519 * as the protocol can still create more subflows. 2520 */ 2521 if (list_is_singular(&msk->conn_list) && msk->first && 2522 inet_sk_state_load(msk->first) == TCP_CLOSE) { 2523 if (sk->sk_state != TCP_ESTABLISHED || 2524 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) { 2525 mptcp_set_state(sk, TCP_CLOSE); 2526 mptcp_close_wake_up(sk); 2527 } else { 2528 mptcp_start_tout_timer(sk); 2529 } 2530 } 2531 } 2532 2533 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, 2534 struct mptcp_subflow_context *subflow) 2535 { 2536 /* The first subflow can already be closed and still in the list */ 2537 if (subflow->close_event_done) 2538 return; 2539 2540 subflow->close_event_done = true; 2541 2542 if (sk->sk_state == TCP_ESTABLISHED) 2543 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); 2544 2545 /* subflow aborted before reaching the fully_established status 2546 * attempt the creation of the next subflow 2547 */ 2548 mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow); 2549 2550 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); 2551 } 2552 2553 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) 2554 { 2555 return 0; 2556 } 2557 2558 static void __mptcp_close_subflow(struct sock *sk) 2559 { 2560 struct mptcp_subflow_context *subflow, *tmp; 2561 struct mptcp_sock *msk = mptcp_sk(sk); 2562 2563 might_sleep(); 2564 2565 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 2566 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2567 int ssk_state = inet_sk_state_load(ssk); 2568 2569 if (ssk_state != TCP_CLOSE && 2570 (ssk_state != TCP_CLOSE_WAIT || 2571 inet_sk_state_load(sk) != TCP_ESTABLISHED)) 2572 continue; 2573 2574 /* 'subflow_data_ready' will re-sched once rx queue is empty */ 2575 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) 2576 continue; 2577 2578 mptcp_close_ssk(sk, ssk, subflow); 2579 } 2580 2581 } 2582 2583 static bool mptcp_close_tout_expired(const struct sock *sk) 2584 { 2585 if (!inet_csk(sk)->icsk_mtup.probe_timestamp || 2586 sk->sk_state == TCP_CLOSE) 2587 return false; 2588 2589 return time_after32(tcp_jiffies32, 2590 inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk)); 2591 } 2592 2593 static void mptcp_check_fastclose(struct mptcp_sock *msk) 2594 { 2595 struct mptcp_subflow_context *subflow, *tmp; 2596 struct sock *sk = (struct sock *)msk; 2597 2598 if (likely(!READ_ONCE(msk->rcv_fastclose))) 2599 return; 2600 2601 mptcp_token_destroy(msk); 2602 2603 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 2604 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 2605 bool slow; 2606 2607 slow = lock_sock_fast(tcp_sk); 2608 if (tcp_sk->sk_state != TCP_CLOSE) { 2609 mptcp_send_active_reset_reason(tcp_sk); 2610 tcp_set_state(tcp_sk, TCP_CLOSE); 2611 } 2612 unlock_sock_fast(tcp_sk, slow); 2613 } 2614 2615 /* Mirror the tcp_reset() error propagation */ 2616 switch (sk->sk_state) { 2617 case TCP_SYN_SENT: 2618 WRITE_ONCE(sk->sk_err, ECONNREFUSED); 2619 break; 2620 case TCP_CLOSE_WAIT: 2621 WRITE_ONCE(sk->sk_err, EPIPE); 2622 break; 2623 case TCP_CLOSE: 2624 return; 2625 default: 2626 WRITE_ONCE(sk->sk_err, ECONNRESET); 2627 } 2628 2629 mptcp_set_state(sk, TCP_CLOSE); 2630 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 2631 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 2632 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); 2633 2634 /* the calling mptcp_worker will properly destroy the socket */ 2635 if (sock_flag(sk, SOCK_DEAD)) 2636 return; 2637 2638 sk->sk_state_change(sk); 2639 sk_error_report(sk); 2640 } 2641 2642 static void __mptcp_retrans(struct sock *sk) 2643 { 2644 struct mptcp_sock *msk = mptcp_sk(sk); 2645 struct mptcp_subflow_context *subflow; 2646 struct mptcp_sendmsg_info info = {}; 2647 struct mptcp_data_frag *dfrag; 2648 struct sock *ssk; 2649 int ret, err; 2650 u16 len = 0; 2651 2652 mptcp_clean_una_wakeup(sk); 2653 2654 /* first check ssk: need to kick "stale" logic */ 2655 err = mptcp_sched_get_retrans(msk); 2656 dfrag = mptcp_rtx_head(sk); 2657 if (!dfrag) { 2658 if (mptcp_data_fin_enabled(msk)) { 2659 struct inet_connection_sock *icsk = inet_csk(sk); 2660 2661 icsk->icsk_retransmits++; 2662 mptcp_set_datafin_timeout(sk); 2663 mptcp_send_ack(msk); 2664 2665 goto reset_timer; 2666 } 2667 2668 if (!mptcp_send_head(sk)) 2669 return; 2670 2671 goto reset_timer; 2672 } 2673 2674 if (err) 2675 goto reset_timer; 2676 2677 mptcp_for_each_subflow(msk, subflow) { 2678 if (READ_ONCE(subflow->scheduled)) { 2679 u16 copied = 0; 2680 2681 mptcp_subflow_set_scheduled(subflow, false); 2682 2683 ssk = mptcp_subflow_tcp_sock(subflow); 2684 2685 lock_sock(ssk); 2686 2687 /* limit retransmission to the bytes already sent on some subflows */ 2688 info.sent = 0; 2689 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : 2690 dfrag->already_sent; 2691 while (info.sent < info.limit) { 2692 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); 2693 if (ret <= 0) 2694 break; 2695 2696 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); 2697 copied += ret; 2698 info.sent += ret; 2699 } 2700 if (copied) { 2701 len = max(copied, len); 2702 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, 2703 info.size_goal); 2704 WRITE_ONCE(msk->allow_infinite_fallback, false); 2705 } 2706 2707 release_sock(ssk); 2708 } 2709 } 2710 2711 msk->bytes_retrans += len; 2712 dfrag->already_sent = max(dfrag->already_sent, len); 2713 2714 reset_timer: 2715 mptcp_check_and_set_pending(sk); 2716 2717 if (!mptcp_rtx_timer_pending(sk)) 2718 mptcp_reset_rtx_timer(sk); 2719 } 2720 2721 /* schedule the timeout timer for the relevant event: either close timeout 2722 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one 2723 */ 2724 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) 2725 { 2726 struct sock *sk = (struct sock *)msk; 2727 unsigned long timeout, close_timeout; 2728 2729 if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp) 2730 return; 2731 2732 close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp - 2733 tcp_jiffies32 + jiffies + mptcp_close_timeout(sk); 2734 2735 /* the close timeout takes precedence on the fail one, and here at least one of 2736 * them is active 2737 */ 2738 timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout; 2739 2740 sk_reset_timer(sk, &sk->sk_timer, timeout); 2741 } 2742 2743 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) 2744 { 2745 struct sock *ssk = msk->first; 2746 bool slow; 2747 2748 if (!ssk) 2749 return; 2750 2751 pr_debug("MP_FAIL doesn't respond, reset the subflow\n"); 2752 2753 slow = lock_sock_fast(ssk); 2754 mptcp_subflow_reset(ssk); 2755 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); 2756 unlock_sock_fast(ssk, slow); 2757 } 2758 2759 static void mptcp_do_fastclose(struct sock *sk) 2760 { 2761 struct mptcp_subflow_context *subflow, *tmp; 2762 struct mptcp_sock *msk = mptcp_sk(sk); 2763 2764 mptcp_set_state(sk, TCP_CLOSE); 2765 mptcp_for_each_subflow_safe(msk, subflow, tmp) 2766 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), 2767 subflow, MPTCP_CF_FASTCLOSE); 2768 } 2769 2770 static void mptcp_worker(struct work_struct *work) 2771 { 2772 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); 2773 struct sock *sk = (struct sock *)msk; 2774 unsigned long fail_tout; 2775 int state; 2776 2777 lock_sock(sk); 2778 state = sk->sk_state; 2779 if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN))) 2780 goto unlock; 2781 2782 mptcp_check_fastclose(msk); 2783 2784 mptcp_pm_nl_work(msk); 2785 2786 mptcp_check_send_data_fin(sk); 2787 mptcp_check_data_fin_ack(sk); 2788 mptcp_check_data_fin(sk); 2789 2790 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 2791 __mptcp_close_subflow(sk); 2792 2793 if (mptcp_close_tout_expired(sk)) { 2794 mptcp_do_fastclose(sk); 2795 mptcp_close_wake_up(sk); 2796 } 2797 2798 if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) { 2799 __mptcp_destroy_sock(sk); 2800 goto unlock; 2801 } 2802 2803 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) 2804 __mptcp_retrans(sk); 2805 2806 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0; 2807 if (fail_tout && time_after(jiffies, fail_tout)) 2808 mptcp_mp_fail_no_response(msk); 2809 2810 unlock: 2811 release_sock(sk); 2812 sock_put(sk); 2813 } 2814 2815 static void __mptcp_init_sock(struct sock *sk) 2816 { 2817 struct mptcp_sock *msk = mptcp_sk(sk); 2818 2819 INIT_LIST_HEAD(&msk->conn_list); 2820 INIT_LIST_HEAD(&msk->join_list); 2821 INIT_LIST_HEAD(&msk->rtx_queue); 2822 INIT_WORK(&msk->work, mptcp_worker); 2823 __skb_queue_head_init(&msk->receive_queue); 2824 msk->out_of_order_queue = RB_ROOT; 2825 msk->first_pending = NULL; 2826 WRITE_ONCE(msk->rmem_fwd_alloc, 0); 2827 WRITE_ONCE(msk->rmem_released, 0); 2828 msk->timer_ival = TCP_RTO_MIN; 2829 msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; 2830 2831 WRITE_ONCE(msk->first, NULL); 2832 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; 2833 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); 2834 WRITE_ONCE(msk->allow_infinite_fallback, true); 2835 msk->recovery = false; 2836 msk->subflow_id = 1; 2837 msk->last_data_sent = tcp_jiffies32; 2838 msk->last_data_recv = tcp_jiffies32; 2839 msk->last_ack_recv = tcp_jiffies32; 2840 2841 mptcp_pm_data_init(msk); 2842 2843 /* re-use the csk retrans timer for MPTCP-level retrans */ 2844 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); 2845 timer_setup(&sk->sk_timer, mptcp_tout_timer, 0); 2846 } 2847 2848 static void mptcp_ca_reset(struct sock *sk) 2849 { 2850 struct inet_connection_sock *icsk = inet_csk(sk); 2851 2852 tcp_assign_congestion_control(sk); 2853 strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name, 2854 sizeof(mptcp_sk(sk)->ca_name)); 2855 2856 /* no need to keep a reference to the ops, the name will suffice */ 2857 tcp_cleanup_congestion_control(sk); 2858 icsk->icsk_ca_ops = NULL; 2859 } 2860 2861 static int mptcp_init_sock(struct sock *sk) 2862 { 2863 struct net *net = sock_net(sk); 2864 int ret; 2865 2866 __mptcp_init_sock(sk); 2867 2868 if (!mptcp_is_enabled(net)) 2869 return -ENOPROTOOPT; 2870 2871 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) 2872 return -ENOMEM; 2873 2874 rcu_read_lock(); 2875 ret = mptcp_init_sched(mptcp_sk(sk), 2876 mptcp_sched_find(mptcp_get_scheduler(net))); 2877 rcu_read_unlock(); 2878 if (ret) 2879 return ret; 2880 2881 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); 2882 2883 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will 2884 * propagate the correct value 2885 */ 2886 mptcp_ca_reset(sk); 2887 2888 sk_sockets_allocated_inc(sk); 2889 sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]); 2890 sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]); 2891 2892 return 0; 2893 } 2894 2895 static void __mptcp_clear_xmit(struct sock *sk) 2896 { 2897 struct mptcp_sock *msk = mptcp_sk(sk); 2898 struct mptcp_data_frag *dtmp, *dfrag; 2899 2900 WRITE_ONCE(msk->first_pending, NULL); 2901 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) 2902 dfrag_clear(sk, dfrag); 2903 } 2904 2905 void mptcp_cancel_work(struct sock *sk) 2906 { 2907 struct mptcp_sock *msk = mptcp_sk(sk); 2908 2909 if (cancel_work_sync(&msk->work)) 2910 __sock_put(sk); 2911 } 2912 2913 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) 2914 { 2915 lock_sock(ssk); 2916 2917 switch (ssk->sk_state) { 2918 case TCP_LISTEN: 2919 if (!(how & RCV_SHUTDOWN)) 2920 break; 2921 fallthrough; 2922 case TCP_SYN_SENT: 2923 WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK)); 2924 break; 2925 default: 2926 if (__mptcp_check_fallback(mptcp_sk(sk))) { 2927 pr_debug("Fallback\n"); 2928 ssk->sk_shutdown |= how; 2929 tcp_shutdown(ssk, how); 2930 2931 /* simulate the data_fin ack reception to let the state 2932 * machine move forward 2933 */ 2934 WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt); 2935 mptcp_schedule_work(sk); 2936 } else { 2937 pr_debug("Sending DATA_FIN on subflow %p\n", ssk); 2938 tcp_send_ack(ssk); 2939 if (!mptcp_rtx_timer_pending(sk)) 2940 mptcp_reset_rtx_timer(sk); 2941 } 2942 break; 2943 } 2944 2945 release_sock(ssk); 2946 } 2947 2948 void mptcp_set_state(struct sock *sk, int state) 2949 { 2950 int oldstate = sk->sk_state; 2951 2952 switch (state) { 2953 case TCP_ESTABLISHED: 2954 if (oldstate != TCP_ESTABLISHED) 2955 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); 2956 break; 2957 case TCP_CLOSE_WAIT: 2958 /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state: 2959 * MPTCP "accepted" sockets will be created later on. So no 2960 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT. 2961 */ 2962 break; 2963 default: 2964 if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) 2965 MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); 2966 } 2967 2968 inet_sk_state_store(sk, state); 2969 } 2970 2971 static const unsigned char new_state[16] = { 2972 /* current state: new state: action: */ 2973 [0 /* (Invalid) */] = TCP_CLOSE, 2974 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2975 [TCP_SYN_SENT] = TCP_CLOSE, 2976 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2977 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 2978 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 2979 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ 2980 [TCP_CLOSE] = TCP_CLOSE, 2981 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 2982 [TCP_LAST_ACK] = TCP_LAST_ACK, 2983 [TCP_LISTEN] = TCP_CLOSE, 2984 [TCP_CLOSING] = TCP_CLOSING, 2985 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 2986 }; 2987 2988 static int mptcp_close_state(struct sock *sk) 2989 { 2990 int next = (int)new_state[sk->sk_state]; 2991 int ns = next & TCP_STATE_MASK; 2992 2993 mptcp_set_state(sk, ns); 2994 2995 return next & TCP_ACTION_FIN; 2996 } 2997 2998 static void mptcp_check_send_data_fin(struct sock *sk) 2999 { 3000 struct mptcp_subflow_context *subflow; 3001 struct mptcp_sock *msk = mptcp_sk(sk); 3002 3003 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n", 3004 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), 3005 msk->snd_nxt, msk->write_seq); 3006 3007 /* we still need to enqueue subflows or not really shutting down, 3008 * skip this 3009 */ 3010 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || 3011 mptcp_send_head(sk)) 3012 return; 3013 3014 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 3015 3016 mptcp_for_each_subflow(msk, subflow) { 3017 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); 3018 3019 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN); 3020 } 3021 } 3022 3023 static void __mptcp_wr_shutdown(struct sock *sk) 3024 { 3025 struct mptcp_sock *msk = mptcp_sk(sk); 3026 3027 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n", 3028 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, 3029 !!mptcp_send_head(sk)); 3030 3031 /* will be ignored by fallback sockets */ 3032 WRITE_ONCE(msk->write_seq, msk->write_seq + 1); 3033 WRITE_ONCE(msk->snd_data_fin_enable, 1); 3034 3035 mptcp_check_send_data_fin(sk); 3036 } 3037 3038 static void __mptcp_destroy_sock(struct sock *sk) 3039 { 3040 struct mptcp_sock *msk = mptcp_sk(sk); 3041 3042 pr_debug("msk=%p\n", msk); 3043 3044 might_sleep(); 3045 3046 mptcp_stop_rtx_timer(sk); 3047 sk_stop_timer(sk, &sk->sk_timer); 3048 msk->pm.status = 0; 3049 mptcp_release_sched(msk); 3050 3051 sk->sk_prot->destroy(sk); 3052 3053 WARN_ON_ONCE(READ_ONCE(msk->rmem_fwd_alloc)); 3054 WARN_ON_ONCE(msk->rmem_released); 3055 sk_stream_kill_queues(sk); 3056 xfrm_sk_free_policy(sk); 3057 3058 sock_put(sk); 3059 } 3060 3061 void __mptcp_unaccepted_force_close(struct sock *sk) 3062 { 3063 sock_set_flag(sk, SOCK_DEAD); 3064 mptcp_do_fastclose(sk); 3065 __mptcp_destroy_sock(sk); 3066 } 3067 3068 static __poll_t mptcp_check_readable(struct sock *sk) 3069 { 3070 return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0; 3071 } 3072 3073 static void mptcp_check_listen_stop(struct sock *sk) 3074 { 3075 struct sock *ssk; 3076 3077 if (inet_sk_state_load(sk) != TCP_LISTEN) 3078 return; 3079 3080 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3081 ssk = mptcp_sk(sk)->first; 3082 if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN)) 3083 return; 3084 3085 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); 3086 tcp_set_state(ssk, TCP_CLOSE); 3087 mptcp_subflow_queue_clean(sk, ssk); 3088 inet_csk_listen_stop(ssk); 3089 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED); 3090 release_sock(ssk); 3091 } 3092 3093 bool __mptcp_close(struct sock *sk, long timeout) 3094 { 3095 struct mptcp_subflow_context *subflow; 3096 struct mptcp_sock *msk = mptcp_sk(sk); 3097 bool do_cancel_work = false; 3098 int subflows_alive = 0; 3099 3100 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 3101 3102 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { 3103 mptcp_check_listen_stop(sk); 3104 mptcp_set_state(sk, TCP_CLOSE); 3105 goto cleanup; 3106 } 3107 3108 if (mptcp_data_avail(msk) || timeout < 0) { 3109 /* If the msk has read data, or the caller explicitly ask it, 3110 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose 3111 */ 3112 mptcp_do_fastclose(sk); 3113 timeout = 0; 3114 } else if (mptcp_close_state(sk)) { 3115 __mptcp_wr_shutdown(sk); 3116 } 3117 3118 sk_stream_wait_close(sk, timeout); 3119 3120 cleanup: 3121 /* orphan all the subflows */ 3122 mptcp_for_each_subflow(msk, subflow) { 3123 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 3124 bool slow = lock_sock_fast_nested(ssk); 3125 3126 subflows_alive += ssk->sk_state != TCP_CLOSE; 3127 3128 /* since the close timeout takes precedence on the fail one, 3129 * cancel the latter 3130 */ 3131 if (ssk == msk->first) 3132 subflow->fail_tout = 0; 3133 3134 /* detach from the parent socket, but allow data_ready to 3135 * push incoming data into the mptcp stack, to properly ack it 3136 */ 3137 ssk->sk_socket = NULL; 3138 ssk->sk_wq = NULL; 3139 unlock_sock_fast(ssk, slow); 3140 } 3141 sock_orphan(sk); 3142 3143 /* all the subflows are closed, only timeout can change the msk 3144 * state, let's not keep resources busy for no reasons 3145 */ 3146 if (subflows_alive == 0) 3147 mptcp_set_state(sk, TCP_CLOSE); 3148 3149 sock_hold(sk); 3150 pr_debug("msk=%p state=%d\n", sk, sk->sk_state); 3151 mptcp_pm_connection_closed(msk); 3152 3153 if (sk->sk_state == TCP_CLOSE) { 3154 __mptcp_destroy_sock(sk); 3155 do_cancel_work = true; 3156 } else { 3157 mptcp_start_tout_timer(sk); 3158 } 3159 3160 return do_cancel_work; 3161 } 3162 3163 static void mptcp_close(struct sock *sk, long timeout) 3164 { 3165 bool do_cancel_work; 3166 3167 lock_sock(sk); 3168 3169 do_cancel_work = __mptcp_close(sk, timeout); 3170 release_sock(sk); 3171 if (do_cancel_work) 3172 mptcp_cancel_work(sk); 3173 3174 sock_put(sk); 3175 } 3176 3177 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) 3178 { 3179 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3180 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); 3181 struct ipv6_pinfo *msk6 = inet6_sk(msk); 3182 3183 msk->sk_v6_daddr = ssk->sk_v6_daddr; 3184 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; 3185 3186 if (msk6 && ssk6) { 3187 msk6->saddr = ssk6->saddr; 3188 msk6->flow_label = ssk6->flow_label; 3189 } 3190 #endif 3191 3192 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; 3193 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; 3194 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; 3195 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 3196 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 3197 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 3198 } 3199 3200 static int mptcp_disconnect(struct sock *sk, int flags) 3201 { 3202 struct mptcp_sock *msk = mptcp_sk(sk); 3203 3204 /* We are on the fastopen error path. We can't call straight into the 3205 * subflows cleanup code due to lock nesting (we are already under 3206 * msk->firstsocket lock). 3207 */ 3208 if (msk->fastopening) 3209 return -EBUSY; 3210 3211 mptcp_check_listen_stop(sk); 3212 mptcp_set_state(sk, TCP_CLOSE); 3213 3214 mptcp_stop_rtx_timer(sk); 3215 mptcp_stop_tout_timer(sk); 3216 3217 mptcp_pm_connection_closed(msk); 3218 3219 /* msk->subflow is still intact, the following will not free the first 3220 * subflow 3221 */ 3222 mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); 3223 WRITE_ONCE(msk->flags, 0); 3224 msk->cb_flags = 0; 3225 msk->recovery = false; 3226 WRITE_ONCE(msk->can_ack, false); 3227 WRITE_ONCE(msk->fully_established, false); 3228 WRITE_ONCE(msk->rcv_data_fin, false); 3229 WRITE_ONCE(msk->snd_data_fin_enable, false); 3230 WRITE_ONCE(msk->rcv_fastclose, false); 3231 WRITE_ONCE(msk->use_64bit_ack, false); 3232 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); 3233 mptcp_pm_data_reset(msk); 3234 mptcp_ca_reset(sk); 3235 msk->bytes_consumed = 0; 3236 msk->bytes_acked = 0; 3237 msk->bytes_received = 0; 3238 msk->bytes_sent = 0; 3239 msk->bytes_retrans = 0; 3240 msk->rcvspace_init = 0; 3241 3242 WRITE_ONCE(sk->sk_shutdown, 0); 3243 sk_error_report(sk); 3244 return 0; 3245 } 3246 3247 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3248 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) 3249 { 3250 unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); 3251 3252 return (struct ipv6_pinfo *)(((u8 *)sk) + offset); 3253 } 3254 3255 static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk) 3256 { 3257 const struct ipv6_pinfo *np = inet6_sk(sk); 3258 struct ipv6_txoptions *opt; 3259 struct ipv6_pinfo *newnp; 3260 3261 newnp = inet6_sk(newsk); 3262 3263 rcu_read_lock(); 3264 opt = rcu_dereference(np->opt); 3265 if (opt) { 3266 opt = ipv6_dup_options(newsk, opt); 3267 if (!opt) 3268 net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__); 3269 } 3270 RCU_INIT_POINTER(newnp->opt, opt); 3271 rcu_read_unlock(); 3272 } 3273 #endif 3274 3275 static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk) 3276 { 3277 struct ip_options_rcu *inet_opt, *newopt = NULL; 3278 const struct inet_sock *inet = inet_sk(sk); 3279 struct inet_sock *newinet; 3280 3281 newinet = inet_sk(newsk); 3282 3283 rcu_read_lock(); 3284 inet_opt = rcu_dereference(inet->inet_opt); 3285 if (inet_opt) { 3286 newopt = sock_kmalloc(newsk, sizeof(*inet_opt) + 3287 inet_opt->opt.optlen, GFP_ATOMIC); 3288 if (newopt) 3289 memcpy(newopt, inet_opt, sizeof(*inet_opt) + 3290 inet_opt->opt.optlen); 3291 else 3292 net_warn_ratelimited("%s: Failed to copy ip options\n", __func__); 3293 } 3294 RCU_INIT_POINTER(newinet->inet_opt, newopt); 3295 rcu_read_unlock(); 3296 } 3297 3298 struct sock *mptcp_sk_clone_init(const struct sock *sk, 3299 const struct mptcp_options_received *mp_opt, 3300 struct sock *ssk, 3301 struct request_sock *req) 3302 { 3303 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 3304 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); 3305 struct mptcp_subflow_context *subflow; 3306 struct mptcp_sock *msk; 3307 3308 if (!nsk) 3309 return NULL; 3310 3311 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3312 if (nsk->sk_family == AF_INET6) 3313 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); 3314 #endif 3315 3316 __mptcp_init_sock(nsk); 3317 3318 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3319 if (nsk->sk_family == AF_INET6) 3320 mptcp_copy_ip6_options(nsk, sk); 3321 else 3322 #endif 3323 mptcp_copy_ip_options(nsk, sk); 3324 3325 msk = mptcp_sk(nsk); 3326 WRITE_ONCE(msk->local_key, subflow_req->local_key); 3327 WRITE_ONCE(msk->token, subflow_req->token); 3328 msk->in_accept_queue = 1; 3329 WRITE_ONCE(msk->fully_established, false); 3330 if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) 3331 WRITE_ONCE(msk->csum_enabled, true); 3332 3333 WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1); 3334 WRITE_ONCE(msk->snd_nxt, msk->write_seq); 3335 WRITE_ONCE(msk->snd_una, msk->write_seq); 3336 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); 3337 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; 3338 mptcp_init_sched(msk, mptcp_sk(sk)->sched); 3339 3340 /* passive msk is created after the first/MPC subflow */ 3341 msk->subflow_id = 2; 3342 3343 sock_reset_flag(nsk, SOCK_RCU_FREE); 3344 security_inet_csk_clone(nsk, req); 3345 3346 /* this can't race with mptcp_close(), as the msk is 3347 * not yet exposted to user-space 3348 */ 3349 mptcp_set_state(nsk, TCP_ESTABLISHED); 3350 3351 /* The msk maintain a ref to each subflow in the connections list */ 3352 WRITE_ONCE(msk->first, ssk); 3353 subflow = mptcp_subflow_ctx(ssk); 3354 list_add(&subflow->node, &msk->conn_list); 3355 sock_hold(ssk); 3356 3357 /* new mpc subflow takes ownership of the newly 3358 * created mptcp socket 3359 */ 3360 mptcp_token_accept(subflow_req, msk); 3361 3362 /* set msk addresses early to ensure mptcp_pm_get_local_id() 3363 * uses the correct data 3364 */ 3365 mptcp_copy_inaddrs(nsk, ssk); 3366 __mptcp_propagate_sndbuf(nsk, ssk); 3367 3368 mptcp_rcv_space_init(msk, ssk); 3369 3370 if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK) 3371 __mptcp_subflow_fully_established(msk, subflow, mp_opt); 3372 bh_unlock_sock(nsk); 3373 3374 /* note: the newly allocated socket refcount is 2 now */ 3375 return nsk; 3376 } 3377 3378 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) 3379 { 3380 const struct tcp_sock *tp = tcp_sk(ssk); 3381 3382 msk->rcvspace_init = 1; 3383 msk->rcvq_space.copied = 0; 3384 msk->rcvq_space.rtt_us = 0; 3385 3386 msk->rcvq_space.time = tp->tcp_mstamp; 3387 3388 /* initial rcv_space offering made to peer */ 3389 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, 3390 TCP_INIT_CWND * tp->advmss); 3391 if (msk->rcvq_space.space == 0) 3392 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; 3393 } 3394 3395 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) 3396 { 3397 struct mptcp_subflow_context *subflow, *tmp; 3398 struct sock *sk = (struct sock *)msk; 3399 3400 __mptcp_clear_xmit(sk); 3401 3402 /* join list will be eventually flushed (with rst) at sock lock release time */ 3403 mptcp_for_each_subflow_safe(msk, subflow, tmp) 3404 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); 3405 3406 /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ 3407 mptcp_data_lock(sk); 3408 skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue); 3409 __skb_queue_purge(&sk->sk_receive_queue); 3410 skb_rbtree_purge(&msk->out_of_order_queue); 3411 mptcp_data_unlock(sk); 3412 3413 /* move all the rx fwd alloc into the sk_mem_reclaim_final in 3414 * inet_sock_destruct() will dispose it 3415 */ 3416 sk_forward_alloc_add(sk, msk->rmem_fwd_alloc); 3417 WRITE_ONCE(msk->rmem_fwd_alloc, 0); 3418 mptcp_token_destroy(msk); 3419 mptcp_pm_free_anno_list(msk); 3420 mptcp_free_local_addr_list(msk); 3421 } 3422 3423 static void mptcp_destroy(struct sock *sk) 3424 { 3425 struct mptcp_sock *msk = mptcp_sk(sk); 3426 3427 /* allow the following to close even the initial subflow */ 3428 msk->free_first = 1; 3429 mptcp_destroy_common(msk, 0); 3430 sk_sockets_allocated_dec(sk); 3431 } 3432 3433 void __mptcp_data_acked(struct sock *sk) 3434 { 3435 if (!sock_owned_by_user(sk)) 3436 __mptcp_clean_una(sk); 3437 else 3438 __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags); 3439 } 3440 3441 void __mptcp_check_push(struct sock *sk, struct sock *ssk) 3442 { 3443 if (!mptcp_send_head(sk)) 3444 return; 3445 3446 if (!sock_owned_by_user(sk)) 3447 __mptcp_subflow_push_pending(sk, ssk, false); 3448 else 3449 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); 3450 } 3451 3452 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \ 3453 BIT(MPTCP_RETRANSMIT) | \ 3454 BIT(MPTCP_FLUSH_JOIN_LIST)) 3455 3456 /* processes deferred events and flush wmem */ 3457 static void mptcp_release_cb(struct sock *sk) 3458 __must_hold(&sk->sk_lock.slock) 3459 { 3460 struct mptcp_sock *msk = mptcp_sk(sk); 3461 3462 for (;;) { 3463 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); 3464 struct list_head join_list; 3465 3466 if (!flags) 3467 break; 3468 3469 INIT_LIST_HEAD(&join_list); 3470 list_splice_init(&msk->join_list, &join_list); 3471 3472 /* the following actions acquire the subflow socket lock 3473 * 3474 * 1) can't be invoked in atomic scope 3475 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX 3476 * datapath acquires the msk socket spinlock while helding 3477 * the subflow socket lock 3478 */ 3479 msk->cb_flags &= ~flags; 3480 spin_unlock_bh(&sk->sk_lock.slock); 3481 3482 if (flags & BIT(MPTCP_FLUSH_JOIN_LIST)) 3483 __mptcp_flush_join_list(sk, &join_list); 3484 if (flags & BIT(MPTCP_PUSH_PENDING)) 3485 __mptcp_push_pending(sk, 0); 3486 if (flags & BIT(MPTCP_RETRANSMIT)) 3487 __mptcp_retrans(sk); 3488 3489 cond_resched(); 3490 spin_lock_bh(&sk->sk_lock.slock); 3491 } 3492 3493 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) 3494 __mptcp_clean_una_wakeup(sk); 3495 if (unlikely(msk->cb_flags)) { 3496 /* be sure to sync the msk state before taking actions 3497 * depending on sk_state (MPTCP_ERROR_REPORT) 3498 * On sk release avoid actions depending on the first subflow 3499 */ 3500 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) 3501 __mptcp_sync_state(sk, msk->pending_state); 3502 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) 3503 __mptcp_error_report(sk); 3504 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) 3505 __mptcp_sync_sndbuf(sk); 3506 } 3507 3508 __mptcp_update_rmem(sk); 3509 } 3510 3511 /* MP_JOIN client subflow must wait for 4th ack before sending any data: 3512 * TCP can't schedule delack timer before the subflow is fully established. 3513 * MPTCP uses the delack timer to do 3rd ack retransmissions 3514 */ 3515 static void schedule_3rdack_retransmission(struct sock *ssk) 3516 { 3517 struct inet_connection_sock *icsk = inet_csk(ssk); 3518 struct tcp_sock *tp = tcp_sk(ssk); 3519 unsigned long timeout; 3520 3521 if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established)) 3522 return; 3523 3524 /* reschedule with a timeout above RTT, as we must look only for drop */ 3525 if (tp->srtt_us) 3526 timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1)); 3527 else 3528 timeout = TCP_TIMEOUT_INIT; 3529 timeout += jiffies; 3530 3531 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); 3532 smp_store_release(&icsk->icsk_ack.pending, 3533 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); 3534 icsk->icsk_ack.timeout = timeout; 3535 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); 3536 } 3537 3538 void mptcp_subflow_process_delegated(struct sock *ssk, long status) 3539 { 3540 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 3541 struct sock *sk = subflow->conn; 3542 3543 if (status & BIT(MPTCP_DELEGATE_SEND)) { 3544 mptcp_data_lock(sk); 3545 if (!sock_owned_by_user(sk)) 3546 __mptcp_subflow_push_pending(sk, ssk, true); 3547 else 3548 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); 3549 mptcp_data_unlock(sk); 3550 } 3551 if (status & BIT(MPTCP_DELEGATE_SNDBUF)) { 3552 mptcp_data_lock(sk); 3553 if (!sock_owned_by_user(sk)) 3554 __mptcp_sync_sndbuf(sk); 3555 else 3556 __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags); 3557 mptcp_data_unlock(sk); 3558 } 3559 if (status & BIT(MPTCP_DELEGATE_ACK)) 3560 schedule_3rdack_retransmission(ssk); 3561 } 3562 3563 static int mptcp_hash(struct sock *sk) 3564 { 3565 /* should never be called, 3566 * we hash the TCP subflows not the MPTCP socket 3567 */ 3568 WARN_ON_ONCE(1); 3569 return 0; 3570 } 3571 3572 static void mptcp_unhash(struct sock *sk) 3573 { 3574 /* called from sk_common_release(), but nothing to do here */ 3575 } 3576 3577 static int mptcp_get_port(struct sock *sk, unsigned short snum) 3578 { 3579 struct mptcp_sock *msk = mptcp_sk(sk); 3580 3581 pr_debug("msk=%p, ssk=%p\n", msk, msk->first); 3582 if (WARN_ON_ONCE(!msk->first)) 3583 return -EINVAL; 3584 3585 return inet_csk_get_port(msk->first, snum); 3586 } 3587 3588 void mptcp_finish_connect(struct sock *ssk) 3589 { 3590 struct mptcp_subflow_context *subflow; 3591 struct mptcp_sock *msk; 3592 struct sock *sk; 3593 3594 subflow = mptcp_subflow_ctx(ssk); 3595 sk = subflow->conn; 3596 msk = mptcp_sk(sk); 3597 3598 pr_debug("msk=%p, token=%u\n", sk, subflow->token); 3599 3600 subflow->map_seq = subflow->iasn; 3601 subflow->map_subflow_seq = 1; 3602 3603 /* the socket is not connected yet, no msk/subflow ops can access/race 3604 * accessing the field below 3605 */ 3606 WRITE_ONCE(msk->local_key, subflow->local_key); 3607 3608 mptcp_pm_new_connection(msk, ssk, 0); 3609 } 3610 3611 void mptcp_sock_graft(struct sock *sk, struct socket *parent) 3612 { 3613 write_lock_bh(&sk->sk_callback_lock); 3614 rcu_assign_pointer(sk->sk_wq, &parent->wq); 3615 sk_set_socket(sk, parent); 3616 sk->sk_uid = SOCK_INODE(parent)->i_uid; 3617 write_unlock_bh(&sk->sk_callback_lock); 3618 } 3619 3620 bool mptcp_finish_join(struct sock *ssk) 3621 { 3622 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 3623 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 3624 struct sock *parent = (void *)msk; 3625 bool ret = true; 3626 3627 pr_debug("msk=%p, subflow=%p\n", msk, subflow); 3628 3629 /* mptcp socket already closing? */ 3630 if (!mptcp_is_fully_established(parent)) { 3631 subflow->reset_reason = MPTCP_RST_EMPTCP; 3632 return false; 3633 } 3634 3635 /* active subflow, already present inside the conn_list */ 3636 if (!list_empty(&subflow->node)) { 3637 mptcp_subflow_joined(msk, ssk); 3638 mptcp_propagate_sndbuf(parent, ssk); 3639 return true; 3640 } 3641 3642 if (!mptcp_pm_allow_new_subflow(msk)) 3643 goto err_prohibited; 3644 3645 /* If we can't acquire msk socket lock here, let the release callback 3646 * handle it 3647 */ 3648 mptcp_data_lock(parent); 3649 if (!sock_owned_by_user(parent)) { 3650 ret = __mptcp_finish_join(msk, ssk); 3651 if (ret) { 3652 sock_hold(ssk); 3653 list_add_tail(&subflow->node, &msk->conn_list); 3654 } 3655 } else { 3656 sock_hold(ssk); 3657 list_add_tail(&subflow->node, &msk->join_list); 3658 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); 3659 } 3660 mptcp_data_unlock(parent); 3661 3662 if (!ret) { 3663 err_prohibited: 3664 subflow->reset_reason = MPTCP_RST_EPROHIBIT; 3665 return false; 3666 } 3667 3668 return true; 3669 } 3670 3671 static void mptcp_shutdown(struct sock *sk, int how) 3672 { 3673 pr_debug("sk=%p, how=%d\n", sk, how); 3674 3675 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) 3676 __mptcp_wr_shutdown(sk); 3677 } 3678 3679 static int mptcp_forward_alloc_get(const struct sock *sk) 3680 { 3681 return READ_ONCE(sk->sk_forward_alloc) + 3682 READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc); 3683 } 3684 3685 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) 3686 { 3687 const struct sock *sk = (void *)msk; 3688 u64 delta; 3689 3690 if (sk->sk_state == TCP_LISTEN) 3691 return -EINVAL; 3692 3693 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 3694 return 0; 3695 3696 delta = msk->write_seq - v; 3697 if (__mptcp_check_fallback(msk) && msk->first) { 3698 struct tcp_sock *tp = tcp_sk(msk->first); 3699 3700 /* the first subflow is disconnected after close - see 3701 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq 3702 * so ignore that status, too. 3703 */ 3704 if (!((1 << msk->first->sk_state) & 3705 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))) 3706 delta += READ_ONCE(tp->write_seq) - tp->snd_una; 3707 } 3708 if (delta > INT_MAX) 3709 delta = INT_MAX; 3710 3711 return (int)delta; 3712 } 3713 3714 static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) 3715 { 3716 struct mptcp_sock *msk = mptcp_sk(sk); 3717 bool slow; 3718 3719 switch (cmd) { 3720 case SIOCINQ: 3721 if (sk->sk_state == TCP_LISTEN) 3722 return -EINVAL; 3723 3724 lock_sock(sk); 3725 __mptcp_move_skbs(msk); 3726 *karg = mptcp_inq_hint(sk); 3727 release_sock(sk); 3728 break; 3729 case SIOCOUTQ: 3730 slow = lock_sock_fast(sk); 3731 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); 3732 unlock_sock_fast(sk, slow); 3733 break; 3734 case SIOCOUTQNSD: 3735 slow = lock_sock_fast(sk); 3736 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt); 3737 unlock_sock_fast(sk, slow); 3738 break; 3739 default: 3740 return -ENOIOCTLCMD; 3741 } 3742 3743 return 0; 3744 } 3745 3746 static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 3747 { 3748 struct mptcp_subflow_context *subflow; 3749 struct mptcp_sock *msk = mptcp_sk(sk); 3750 int err = -EINVAL; 3751 struct sock *ssk; 3752 3753 ssk = __mptcp_nmpc_sk(msk); 3754 if (IS_ERR(ssk)) 3755 return PTR_ERR(ssk); 3756 3757 mptcp_set_state(sk, TCP_SYN_SENT); 3758 subflow = mptcp_subflow_ctx(ssk); 3759 #ifdef CONFIG_TCP_MD5SIG 3760 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of 3761 * TCP option space. 3762 */ 3763 if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info)) 3764 mptcp_subflow_early_fallback(msk, subflow); 3765 #endif 3766 if (subflow->request_mptcp) { 3767 if (mptcp_active_should_disable(sk)) { 3768 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDISABLED); 3769 mptcp_subflow_early_fallback(msk, subflow); 3770 } else if (mptcp_token_new_connect(ssk) < 0) { 3771 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT); 3772 mptcp_subflow_early_fallback(msk, subflow); 3773 } 3774 } 3775 3776 WRITE_ONCE(msk->write_seq, subflow->idsn); 3777 WRITE_ONCE(msk->snd_nxt, subflow->idsn); 3778 WRITE_ONCE(msk->snd_una, subflow->idsn); 3779 if (likely(!__mptcp_check_fallback(msk))) 3780 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE); 3781 3782 /* if reaching here via the fastopen/sendmsg path, the caller already 3783 * acquired the subflow socket lock, too. 3784 */ 3785 if (!msk->fastopening) 3786 lock_sock(ssk); 3787 3788 /* the following mirrors closely a very small chunk of code from 3789 * __inet_stream_connect() 3790 */ 3791 if (ssk->sk_state != TCP_CLOSE) 3792 goto out; 3793 3794 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) { 3795 err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len); 3796 if (err) 3797 goto out; 3798 } 3799 3800 err = ssk->sk_prot->connect(ssk, uaddr, addr_len); 3801 if (err < 0) 3802 goto out; 3803 3804 inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk)); 3805 3806 out: 3807 if (!msk->fastopening) 3808 release_sock(ssk); 3809 3810 /* on successful connect, the msk state will be moved to established by 3811 * subflow_finish_connect() 3812 */ 3813 if (unlikely(err)) { 3814 /* avoid leaving a dangling token in an unconnected socket */ 3815 mptcp_token_destroy(msk); 3816 mptcp_set_state(sk, TCP_CLOSE); 3817 return err; 3818 } 3819 3820 mptcp_copy_inaddrs(sk, ssk); 3821 return 0; 3822 } 3823 3824 static struct proto mptcp_prot = { 3825 .name = "MPTCP", 3826 .owner = THIS_MODULE, 3827 .init = mptcp_init_sock, 3828 .connect = mptcp_connect, 3829 .disconnect = mptcp_disconnect, 3830 .close = mptcp_close, 3831 .setsockopt = mptcp_setsockopt, 3832 .getsockopt = mptcp_getsockopt, 3833 .shutdown = mptcp_shutdown, 3834 .destroy = mptcp_destroy, 3835 .sendmsg = mptcp_sendmsg, 3836 .ioctl = mptcp_ioctl, 3837 .recvmsg = mptcp_recvmsg, 3838 .release_cb = mptcp_release_cb, 3839 .hash = mptcp_hash, 3840 .unhash = mptcp_unhash, 3841 .get_port = mptcp_get_port, 3842 .forward_alloc_get = mptcp_forward_alloc_get, 3843 .stream_memory_free = mptcp_stream_memory_free, 3844 .sockets_allocated = &mptcp_sockets_allocated, 3845 3846 .memory_allocated = &tcp_memory_allocated, 3847 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc, 3848 3849 .memory_pressure = &tcp_memory_pressure, 3850 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), 3851 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), 3852 .sysctl_mem = sysctl_tcp_mem, 3853 .obj_size = sizeof(struct mptcp_sock), 3854 .slab_flags = SLAB_TYPESAFE_BY_RCU, 3855 .no_autobind = true, 3856 }; 3857 3858 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3859 { 3860 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3861 struct sock *ssk, *sk = sock->sk; 3862 int err = -EINVAL; 3863 3864 lock_sock(sk); 3865 ssk = __mptcp_nmpc_sk(msk); 3866 if (IS_ERR(ssk)) { 3867 err = PTR_ERR(ssk); 3868 goto unlock; 3869 } 3870 3871 if (sk->sk_family == AF_INET) 3872 err = inet_bind_sk(ssk, uaddr, addr_len); 3873 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 3874 else if (sk->sk_family == AF_INET6) 3875 err = inet6_bind_sk(ssk, uaddr, addr_len); 3876 #endif 3877 if (!err) 3878 mptcp_copy_inaddrs(sk, ssk); 3879 3880 unlock: 3881 release_sock(sk); 3882 return err; 3883 } 3884 3885 static int mptcp_listen(struct socket *sock, int backlog) 3886 { 3887 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3888 struct sock *sk = sock->sk; 3889 struct sock *ssk; 3890 int err; 3891 3892 pr_debug("msk=%p\n", msk); 3893 3894 lock_sock(sk); 3895 3896 err = -EINVAL; 3897 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) 3898 goto unlock; 3899 3900 ssk = __mptcp_nmpc_sk(msk); 3901 if (IS_ERR(ssk)) { 3902 err = PTR_ERR(ssk); 3903 goto unlock; 3904 } 3905 3906 mptcp_set_state(sk, TCP_LISTEN); 3907 sock_set_flag(sk, SOCK_RCU_FREE); 3908 3909 lock_sock(ssk); 3910 err = __inet_listen_sk(ssk, backlog); 3911 release_sock(ssk); 3912 mptcp_set_state(sk, inet_sk_state_load(ssk)); 3913 3914 if (!err) { 3915 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3916 mptcp_copy_inaddrs(sk, ssk); 3917 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); 3918 } 3919 3920 unlock: 3921 release_sock(sk); 3922 return err; 3923 } 3924 3925 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, 3926 struct proto_accept_arg *arg) 3927 { 3928 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3929 struct sock *ssk, *newsk; 3930 3931 pr_debug("msk=%p\n", msk); 3932 3933 /* Buggy applications can call accept on socket states other then LISTEN 3934 * but no need to allocate the first subflow just to error out. 3935 */ 3936 ssk = READ_ONCE(msk->first); 3937 if (!ssk) 3938 return -EINVAL; 3939 3940 pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk)); 3941 newsk = inet_csk_accept(ssk, arg); 3942 if (!newsk) 3943 return arg->err; 3944 3945 pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk)); 3946 if (sk_is_mptcp(newsk)) { 3947 struct mptcp_subflow_context *subflow; 3948 struct sock *new_mptcp_sock; 3949 3950 subflow = mptcp_subflow_ctx(newsk); 3951 new_mptcp_sock = subflow->conn; 3952 3953 /* is_mptcp should be false if subflow->conn is missing, see 3954 * subflow_syn_recv_sock() 3955 */ 3956 if (WARN_ON_ONCE(!new_mptcp_sock)) { 3957 tcp_sk(newsk)->is_mptcp = 0; 3958 goto tcpfallback; 3959 } 3960 3961 newsk = new_mptcp_sock; 3962 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK); 3963 3964 newsk->sk_kern_sock = arg->kern; 3965 lock_sock(newsk); 3966 __inet_accept(sock, newsock, newsk); 3967 3968 set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags); 3969 msk = mptcp_sk(newsk); 3970 msk->in_accept_queue = 0; 3971 3972 /* set ssk->sk_socket of accept()ed flows to mptcp socket. 3973 * This is needed so NOSPACE flag can be set from tcp stack. 3974 */ 3975 mptcp_for_each_subflow(msk, subflow) { 3976 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 3977 3978 if (!ssk->sk_socket) 3979 mptcp_sock_graft(ssk, newsock); 3980 } 3981 3982 /* Do late cleanup for the first subflow as necessary. Also 3983 * deal with bad peers not doing a complete shutdown. 3984 */ 3985 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { 3986 __mptcp_close_ssk(newsk, msk->first, 3987 mptcp_subflow_ctx(msk->first), 0); 3988 if (unlikely(list_is_singular(&msk->conn_list))) 3989 mptcp_set_state(newsk, TCP_CLOSE); 3990 } 3991 } else { 3992 tcpfallback: 3993 newsk->sk_kern_sock = arg->kern; 3994 lock_sock(newsk); 3995 __inet_accept(sock, newsock, newsk); 3996 /* we are being invoked after accepting a non-mp-capable 3997 * flow: sk is a tcp_sk, not an mptcp one. 3998 * 3999 * Hand the socket over to tcp so all further socket ops 4000 * bypass mptcp. 4001 */ 4002 WRITE_ONCE(newsock->sk->sk_socket->ops, 4003 mptcp_fallback_tcp_ops(newsock->sk)); 4004 } 4005 release_sock(newsk); 4006 4007 return 0; 4008 } 4009 4010 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) 4011 { 4012 struct sock *sk = (struct sock *)msk; 4013 4014 if (__mptcp_stream_is_writeable(sk, 1)) 4015 return EPOLLOUT | EPOLLWRNORM; 4016 4017 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 4018 smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */ 4019 if (__mptcp_stream_is_writeable(sk, 1)) 4020 return EPOLLOUT | EPOLLWRNORM; 4021 4022 return 0; 4023 } 4024 4025 static __poll_t mptcp_poll(struct file *file, struct socket *sock, 4026 struct poll_table_struct *wait) 4027 { 4028 struct sock *sk = sock->sk; 4029 struct mptcp_sock *msk; 4030 __poll_t mask = 0; 4031 u8 shutdown; 4032 int state; 4033 4034 msk = mptcp_sk(sk); 4035 sock_poll_wait(file, sock, wait); 4036 4037 state = inet_sk_state_load(sk); 4038 pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); 4039 if (state == TCP_LISTEN) { 4040 struct sock *ssk = READ_ONCE(msk->first); 4041 4042 if (WARN_ON_ONCE(!ssk)) 4043 return 0; 4044 4045 return inet_csk_listen_poll(ssk); 4046 } 4047 4048 shutdown = READ_ONCE(sk->sk_shutdown); 4049 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 4050 mask |= EPOLLHUP; 4051 if (shutdown & RCV_SHUTDOWN) 4052 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 4053 4054 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { 4055 mask |= mptcp_check_readable(sk); 4056 if (shutdown & SEND_SHUTDOWN) 4057 mask |= EPOLLOUT | EPOLLWRNORM; 4058 else 4059 mask |= mptcp_check_writeable(msk); 4060 } else if (state == TCP_SYN_SENT && 4061 inet_test_bit(DEFER_CONNECT, sk)) { 4062 /* cf tcp_poll() note about TFO */ 4063 mask |= EPOLLOUT | EPOLLWRNORM; 4064 } 4065 4066 /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */ 4067 smp_rmb(); 4068 if (READ_ONCE(sk->sk_err)) 4069 mask |= EPOLLERR; 4070 4071 return mask; 4072 } 4073 4074 static const struct proto_ops mptcp_stream_ops = { 4075 .family = PF_INET, 4076 .owner = THIS_MODULE, 4077 .release = inet_release, 4078 .bind = mptcp_bind, 4079 .connect = inet_stream_connect, 4080 .socketpair = sock_no_socketpair, 4081 .accept = mptcp_stream_accept, 4082 .getname = inet_getname, 4083 .poll = mptcp_poll, 4084 .ioctl = inet_ioctl, 4085 .gettstamp = sock_gettstamp, 4086 .listen = mptcp_listen, 4087 .shutdown = inet_shutdown, 4088 .setsockopt = sock_common_setsockopt, 4089 .getsockopt = sock_common_getsockopt, 4090 .sendmsg = inet_sendmsg, 4091 .recvmsg = inet_recvmsg, 4092 .mmap = sock_no_mmap, 4093 .set_rcvlowat = mptcp_set_rcvlowat, 4094 }; 4095 4096 static struct inet_protosw mptcp_protosw = { 4097 .type = SOCK_STREAM, 4098 .protocol = IPPROTO_MPTCP, 4099 .prot = &mptcp_prot, 4100 .ops = &mptcp_stream_ops, 4101 .flags = INET_PROTOSW_ICSK, 4102 }; 4103 4104 static int mptcp_napi_poll(struct napi_struct *napi, int budget) 4105 { 4106 struct mptcp_delegated_action *delegated; 4107 struct mptcp_subflow_context *subflow; 4108 int work_done = 0; 4109 4110 delegated = container_of(napi, struct mptcp_delegated_action, napi); 4111 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { 4112 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 4113 4114 bh_lock_sock_nested(ssk); 4115 if (!sock_owned_by_user(ssk)) { 4116 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); 4117 } else { 4118 /* tcp_release_cb_override already processed 4119 * the action or will do at next release_sock(). 4120 * In both case must dequeue the subflow here - on the same 4121 * CPU that scheduled it. 4122 */ 4123 smp_wmb(); 4124 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status); 4125 } 4126 bh_unlock_sock(ssk); 4127 sock_put(ssk); 4128 4129 if (++work_done == budget) 4130 return budget; 4131 } 4132 4133 /* always provide a 0 'work_done' argument, so that napi_complete_done 4134 * will not try accessing the NULL napi->dev ptr 4135 */ 4136 napi_complete_done(napi, 0); 4137 return work_done; 4138 } 4139 4140 void __init mptcp_proto_init(void) 4141 { 4142 struct mptcp_delegated_action *delegated; 4143 int cpu; 4144 4145 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; 4146 4147 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) 4148 panic("Failed to allocate MPTCP pcpu counter\n"); 4149 4150 init_dummy_netdev(&mptcp_napi_dev); 4151 for_each_possible_cpu(cpu) { 4152 delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu); 4153 INIT_LIST_HEAD(&delegated->head); 4154 netif_napi_add_tx(&mptcp_napi_dev, &delegated->napi, 4155 mptcp_napi_poll); 4156 napi_enable(&delegated->napi); 4157 } 4158 4159 mptcp_subflow_init(); 4160 mptcp_pm_init(); 4161 mptcp_sched_init(); 4162 mptcp_token_init(); 4163 4164 if (proto_register(&mptcp_prot, 1) != 0) 4165 panic("Failed to register MPTCP proto.\n"); 4166 4167 inet_register_protosw(&mptcp_protosw); 4168 4169 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); 4170 } 4171 4172 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 4173 static const struct proto_ops mptcp_v6_stream_ops = { 4174 .family = PF_INET6, 4175 .owner = THIS_MODULE, 4176 .release = inet6_release, 4177 .bind = mptcp_bind, 4178 .connect = inet_stream_connect, 4179 .socketpair = sock_no_socketpair, 4180 .accept = mptcp_stream_accept, 4181 .getname = inet6_getname, 4182 .poll = mptcp_poll, 4183 .ioctl = inet6_ioctl, 4184 .gettstamp = sock_gettstamp, 4185 .listen = mptcp_listen, 4186 .shutdown = inet_shutdown, 4187 .setsockopt = sock_common_setsockopt, 4188 .getsockopt = sock_common_getsockopt, 4189 .sendmsg = inet6_sendmsg, 4190 .recvmsg = inet6_recvmsg, 4191 .mmap = sock_no_mmap, 4192 #ifdef CONFIG_COMPAT 4193 .compat_ioctl = inet6_compat_ioctl, 4194 #endif 4195 .set_rcvlowat = mptcp_set_rcvlowat, 4196 }; 4197 4198 static struct proto mptcp_v6_prot; 4199 4200 static struct inet_protosw mptcp_v6_protosw = { 4201 .type = SOCK_STREAM, 4202 .protocol = IPPROTO_MPTCP, 4203 .prot = &mptcp_v6_prot, 4204 .ops = &mptcp_v6_stream_ops, 4205 .flags = INET_PROTOSW_ICSK, 4206 }; 4207 4208 int __init mptcp_proto_v6_init(void) 4209 { 4210 int err; 4211 4212 mptcp_v6_prot = mptcp_prot; 4213 strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name)); 4214 mptcp_v6_prot.slab = NULL; 4215 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); 4216 mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np); 4217 4218 err = proto_register(&mptcp_v6_prot, 1); 4219 if (err) 4220 return err; 4221 4222 err = inet6_register_protosw(&mptcp_v6_protosw); 4223 if (err) 4224 proto_unregister(&mptcp_v6_prot); 4225 4226 return err; 4227 } 4228 #endif 4229