1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/rculist.h> 9 #include <linux/spinlock.h> 10 #include "protocol.h" 11 #include "mib.h" 12 13 #define ADD_ADDR_RETRANS_MAX 3 14 15 struct mptcp_pm_add_entry { 16 struct list_head list; 17 struct mptcp_addr_info addr; 18 u8 retrans_times; 19 struct timer_list add_timer; 20 struct mptcp_sock *sock; 21 }; 22 23 static DEFINE_SPINLOCK(mptcp_pm_list_lock); 24 static LIST_HEAD(mptcp_pm_list); 25 26 /* path manager helpers */ 27 28 /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, 29 * otherwise allow any matching local/remote pair 30 */ 31 bool mptcp_pm_addr_families_match(const struct sock *sk, 32 const struct mptcp_addr_info *loc, 33 const struct mptcp_addr_info *rem) 34 { 35 bool mptcp_is_v4 = sk->sk_family == AF_INET; 36 37 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 38 bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); 39 bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); 40 41 if (mptcp_is_v4) 42 return loc_is_v4 && rem_is_v4; 43 44 if (ipv6_only_sock(sk)) 45 return !loc_is_v4 && !rem_is_v4; 46 47 return loc_is_v4 == rem_is_v4; 48 #else 49 return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; 50 #endif 51 } 52 53 bool mptcp_addresses_equal(const struct mptcp_addr_info *a, 54 const struct mptcp_addr_info *b, bool use_port) 55 { 56 bool addr_equals = false; 57 58 if (a->family == b->family) { 59 if (a->family == AF_INET) 60 addr_equals = a->addr.s_addr == b->addr.s_addr; 61 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 62 else 63 addr_equals = ipv6_addr_equal(&a->addr6, &b->addr6); 64 } else if (a->family == AF_INET) { 65 if (ipv6_addr_v4mapped(&b->addr6)) 66 addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3]; 67 } else if (b->family == AF_INET) { 68 if (ipv6_addr_v4mapped(&a->addr6)) 69 addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr; 70 #endif 71 } 72 73 if (!addr_equals) 74 return false; 75 if (!use_port) 76 return true; 77 78 return a->port == b->port; 79 } 80 81 void mptcp_local_address(const struct sock_common *skc, 82 struct mptcp_addr_info *addr) 83 { 84 addr->family = skc->skc_family; 85 addr->port = htons(skc->skc_num); 86 if (addr->family == AF_INET) 87 addr->addr.s_addr = skc->skc_rcv_saddr; 88 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 89 else if (addr->family == AF_INET6) 90 addr->addr6 = skc->skc_v6_rcv_saddr; 91 #endif 92 } 93 94 void mptcp_remote_address(const struct sock_common *skc, 95 struct mptcp_addr_info *addr) 96 { 97 addr->family = skc->skc_family; 98 addr->port = skc->skc_dport; 99 if (addr->family == AF_INET) 100 addr->addr.s_addr = skc->skc_daddr; 101 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 102 else if (addr->family == AF_INET6) 103 addr->addr6 = skc->skc_v6_daddr; 104 #endif 105 } 106 107 static bool mptcp_pm_is_init_remote_addr(struct mptcp_sock *msk, 108 const struct mptcp_addr_info *remote) 109 { 110 struct mptcp_addr_info mpc_remote; 111 112 mptcp_remote_address((struct sock_common *)msk, &mpc_remote); 113 return mptcp_addresses_equal(&mpc_remote, remote, remote->port); 114 } 115 116 bool mptcp_lookup_subflow_by_saddr(const struct list_head *list, 117 const struct mptcp_addr_info *saddr) 118 { 119 struct mptcp_subflow_context *subflow; 120 struct mptcp_addr_info cur; 121 struct sock_common *skc; 122 123 list_for_each_entry(subflow, list, node) { 124 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); 125 126 mptcp_local_address(skc, &cur); 127 if (mptcp_addresses_equal(&cur, saddr, saddr->port)) 128 return true; 129 } 130 131 return false; 132 } 133 134 static struct mptcp_pm_add_entry * 135 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, 136 const struct mptcp_addr_info *addr) 137 { 138 struct mptcp_pm_add_entry *entry; 139 140 lockdep_assert_held(&msk->pm.lock); 141 142 list_for_each_entry(entry, &msk->pm.anno_list, list) { 143 if (mptcp_addresses_equal(&entry->addr, addr, true)) 144 return entry; 145 } 146 147 return NULL; 148 } 149 150 bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk, 151 const struct mptcp_addr_info *addr) 152 { 153 struct mptcp_pm_add_entry *entry; 154 bool ret; 155 156 entry = mptcp_pm_del_add_timer(msk, addr, false); 157 ret = entry; 158 kfree(entry); 159 160 return ret; 161 } 162 163 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) 164 { 165 struct mptcp_pm_add_entry *entry; 166 struct mptcp_addr_info saddr; 167 bool ret = false; 168 169 mptcp_local_address((struct sock_common *)sk, &saddr); 170 171 spin_lock_bh(&msk->pm.lock); 172 list_for_each_entry(entry, &msk->pm.anno_list, list) { 173 if (mptcp_addresses_equal(&entry->addr, &saddr, true)) { 174 ret = true; 175 goto out; 176 } 177 } 178 179 out: 180 spin_unlock_bh(&msk->pm.lock); 181 return ret; 182 } 183 184 static void __mptcp_pm_send_ack(struct mptcp_sock *msk, 185 struct mptcp_subflow_context *subflow, 186 bool prio, bool backup) 187 { 188 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 189 bool slow; 190 191 pr_debug("send ack for %s\n", 192 prio ? "mp_prio" : 193 (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); 194 195 slow = lock_sock_fast(ssk); 196 if (prio) { 197 subflow->send_mp_prio = 1; 198 subflow->request_bkup = backup; 199 } 200 201 __mptcp_subflow_send_ack(ssk); 202 unlock_sock_fast(ssk, slow); 203 } 204 205 void mptcp_pm_send_ack(struct mptcp_sock *msk, 206 struct mptcp_subflow_context *subflow, 207 bool prio, bool backup) 208 { 209 spin_unlock_bh(&msk->pm.lock); 210 __mptcp_pm_send_ack(msk, subflow, prio, backup); 211 spin_lock_bh(&msk->pm.lock); 212 } 213 214 void mptcp_pm_addr_send_ack(struct mptcp_sock *msk) 215 { 216 struct mptcp_subflow_context *subflow, *alt = NULL; 217 218 msk_owned_by_me(msk); 219 lockdep_assert_held(&msk->pm.lock); 220 221 if (!mptcp_pm_should_add_signal(msk) && 222 !mptcp_pm_should_rm_signal(msk)) 223 return; 224 225 mptcp_for_each_subflow(msk, subflow) { 226 if (__mptcp_subflow_active(subflow)) { 227 if (!subflow->stale) { 228 mptcp_pm_send_ack(msk, subflow, false, false); 229 return; 230 } 231 232 if (!alt) 233 alt = subflow; 234 } 235 } 236 237 if (alt) 238 mptcp_pm_send_ack(msk, alt, false, false); 239 } 240 241 int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk, 242 struct mptcp_addr_info *addr, 243 struct mptcp_addr_info *rem, 244 u8 bkup) 245 { 246 struct mptcp_subflow_context *subflow; 247 248 pr_debug("bkup=%d\n", bkup); 249 250 mptcp_for_each_subflow(msk, subflow) { 251 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 252 struct mptcp_addr_info local, remote; 253 254 mptcp_local_address((struct sock_common *)ssk, &local); 255 if (!mptcp_addresses_equal(&local, addr, addr->port)) 256 continue; 257 258 if (rem && rem->family != AF_UNSPEC) { 259 mptcp_remote_address((struct sock_common *)ssk, &remote); 260 if (!mptcp_addresses_equal(&remote, rem, rem->port)) 261 continue; 262 } 263 264 __mptcp_pm_send_ack(msk, subflow, true, bkup); 265 return 0; 266 } 267 268 return -EINVAL; 269 } 270 271 static unsigned int mptcp_adjust_add_addr_timeout(struct mptcp_sock *msk) 272 { 273 const struct net *net = sock_net((struct sock *)msk); 274 unsigned int rto = mptcp_get_add_addr_timeout(net); 275 struct mptcp_subflow_context *subflow; 276 unsigned int max = 0; 277 278 mptcp_for_each_subflow(msk, subflow) { 279 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 280 struct inet_connection_sock *icsk = inet_csk(ssk); 281 282 if (icsk->icsk_rto > max) 283 max = icsk->icsk_rto; 284 } 285 286 if (max && max < rto) 287 rto = max; 288 289 return rto; 290 } 291 292 static void mptcp_pm_add_timer(struct timer_list *timer) 293 { 294 struct mptcp_pm_add_entry *entry = timer_container_of(entry, timer, 295 add_timer); 296 struct mptcp_sock *msk = entry->sock; 297 struct sock *sk = (struct sock *)msk; 298 unsigned int timeout; 299 300 pr_debug("msk=%p\n", msk); 301 302 if (!msk) 303 return; 304 305 if (inet_sk_state_load(sk) == TCP_CLOSE) 306 return; 307 308 if (!entry->addr.id) 309 return; 310 311 if (mptcp_pm_should_add_signal_addr(msk)) { 312 sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8); 313 goto out; 314 } 315 316 timeout = mptcp_adjust_add_addr_timeout(msk); 317 if (!timeout) 318 goto out; 319 320 spin_lock_bh(&msk->pm.lock); 321 322 if (!mptcp_pm_should_add_signal_addr(msk)) { 323 pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); 324 mptcp_pm_announce_addr(msk, &entry->addr, false); 325 mptcp_pm_add_addr_send_ack(msk); 326 entry->retrans_times++; 327 } 328 329 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) 330 sk_reset_timer(sk, timer, 331 jiffies + (timeout << entry->retrans_times)); 332 333 spin_unlock_bh(&msk->pm.lock); 334 335 if (entry->retrans_times == ADD_ADDR_RETRANS_MAX) 336 mptcp_pm_subflow_established(msk); 337 338 out: 339 __sock_put(sk); 340 } 341 342 struct mptcp_pm_add_entry * 343 mptcp_pm_del_add_timer(struct mptcp_sock *msk, 344 const struct mptcp_addr_info *addr, bool check_id) 345 { 346 struct mptcp_pm_add_entry *entry; 347 struct sock *sk = (struct sock *)msk; 348 struct timer_list *add_timer = NULL; 349 350 spin_lock_bh(&msk->pm.lock); 351 entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 352 if (entry && (!check_id || entry->addr.id == addr->id)) { 353 entry->retrans_times = ADD_ADDR_RETRANS_MAX; 354 add_timer = &entry->add_timer; 355 } 356 if (!check_id && entry) 357 list_del(&entry->list); 358 spin_unlock_bh(&msk->pm.lock); 359 360 /* no lock, because sk_stop_timer_sync() is calling timer_delete_sync() */ 361 if (add_timer) 362 sk_stop_timer_sync(sk, add_timer); 363 364 return entry; 365 } 366 367 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, 368 const struct mptcp_addr_info *addr) 369 { 370 struct mptcp_pm_add_entry *add_entry = NULL; 371 struct sock *sk = (struct sock *)msk; 372 unsigned int timeout; 373 374 lockdep_assert_held(&msk->pm.lock); 375 376 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 377 378 if (add_entry) { 379 if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) 380 return false; 381 382 goto reset_timer; 383 } 384 385 add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); 386 if (!add_entry) 387 return false; 388 389 list_add(&add_entry->list, &msk->pm.anno_list); 390 391 add_entry->addr = *addr; 392 add_entry->sock = msk; 393 add_entry->retrans_times = 0; 394 395 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); 396 reset_timer: 397 timeout = mptcp_adjust_add_addr_timeout(msk); 398 if (timeout) 399 sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout); 400 401 return true; 402 } 403 404 static void mptcp_pm_free_anno_list(struct mptcp_sock *msk) 405 { 406 struct mptcp_pm_add_entry *entry, *tmp; 407 struct sock *sk = (struct sock *)msk; 408 LIST_HEAD(free_list); 409 410 pr_debug("msk=%p\n", msk); 411 412 spin_lock_bh(&msk->pm.lock); 413 list_splice_init(&msk->pm.anno_list, &free_list); 414 spin_unlock_bh(&msk->pm.lock); 415 416 list_for_each_entry_safe(entry, tmp, &free_list, list) { 417 sk_stop_timer_sync(sk, &entry->add_timer); 418 kfree(entry); 419 } 420 } 421 422 /* path manager command handlers */ 423 424 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 425 const struct mptcp_addr_info *addr, 426 bool echo) 427 { 428 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 429 430 pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo); 431 432 lockdep_assert_held(&msk->pm.lock); 433 434 if (add_addr & 435 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { 436 MPTCP_INC_STATS(sock_net((struct sock *)msk), 437 echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP); 438 return -EINVAL; 439 } 440 441 if (echo) { 442 msk->pm.remote = *addr; 443 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 444 } else { 445 msk->pm.local = *addr; 446 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 447 } 448 WRITE_ONCE(msk->pm.addr_signal, add_addr); 449 return 0; 450 } 451 452 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 453 { 454 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 455 456 pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr); 457 458 if (rm_addr) { 459 MPTCP_ADD_STATS(sock_net((struct sock *)msk), 460 MPTCP_MIB_RMADDRTXDROP, rm_list->nr); 461 return -EINVAL; 462 } 463 464 msk->pm.rm_list_tx = *rm_list; 465 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); 466 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 467 mptcp_pm_addr_send_ack(msk); 468 return 0; 469 } 470 471 /* path manager event handlers */ 472 473 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) 474 { 475 struct mptcp_pm_data *pm = &msk->pm; 476 477 pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side); 478 479 WRITE_ONCE(pm->server_side, server_side); 480 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); 481 } 482 483 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 484 { 485 struct mptcp_pm_data *pm = &msk->pm; 486 unsigned int limit_extra_subflows; 487 int ret = 0; 488 489 if (mptcp_pm_is_userspace(msk)) { 490 if (mptcp_userspace_pm_active(msk)) { 491 spin_lock_bh(&pm->lock); 492 pm->extra_subflows++; 493 spin_unlock_bh(&pm->lock); 494 return true; 495 } 496 return false; 497 } 498 499 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 500 501 pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, 502 pm->extra_subflows, limit_extra_subflows, 503 READ_ONCE(pm->accept_subflow)); 504 505 /* try to avoid acquiring the lock below */ 506 if (!READ_ONCE(pm->accept_subflow)) 507 return false; 508 509 spin_lock_bh(&pm->lock); 510 if (READ_ONCE(pm->accept_subflow)) { 511 ret = pm->extra_subflows < limit_extra_subflows; 512 if (ret && ++pm->extra_subflows == limit_extra_subflows) 513 WRITE_ONCE(pm->accept_subflow, false); 514 } 515 spin_unlock_bh(&pm->lock); 516 517 return ret; 518 } 519 520 /* return true if the new status bit is currently cleared, that is, this event 521 * can be server, eventually by an already scheduled work 522 */ 523 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 524 enum mptcp_pm_status new_status) 525 { 526 pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status, 527 BIT(new_status)); 528 if (msk->pm.status & BIT(new_status)) 529 return false; 530 531 msk->pm.status |= BIT(new_status); 532 mptcp_schedule_work((struct sock *)msk); 533 return true; 534 } 535 536 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) 537 { 538 struct mptcp_pm_data *pm = &msk->pm; 539 bool announce = false; 540 541 pr_debug("msk=%p\n", msk); 542 543 spin_lock_bh(&pm->lock); 544 545 /* mptcp_pm_fully_established() can be invoked by multiple 546 * racing paths - accept() and check_fully_established() 547 * be sure to serve this event only once. 548 */ 549 if (READ_ONCE(pm->work_pending) && 550 !(pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) 551 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 552 553 if ((pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) 554 announce = true; 555 556 pm->status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); 557 spin_unlock_bh(&pm->lock); 558 559 if (announce) 560 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC); 561 } 562 563 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 564 { 565 pr_debug("msk=%p\n", msk); 566 567 if (msk->token) 568 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL); 569 } 570 571 void mptcp_pm_subflow_established(struct mptcp_sock *msk) 572 { 573 struct mptcp_pm_data *pm = &msk->pm; 574 575 pr_debug("msk=%p\n", msk); 576 577 if (!READ_ONCE(pm->work_pending)) 578 return; 579 580 spin_lock_bh(&pm->lock); 581 582 if (READ_ONCE(pm->work_pending)) 583 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 584 585 spin_unlock_bh(&pm->lock); 586 } 587 588 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, 589 const struct mptcp_subflow_context *subflow) 590 { 591 struct mptcp_pm_data *pm = &msk->pm; 592 bool update_subflows; 593 594 update_subflows = subflow->request_join || subflow->mp_join; 595 if (mptcp_pm_is_userspace(msk)) { 596 if (update_subflows) { 597 spin_lock_bh(&pm->lock); 598 pm->extra_subflows--; 599 spin_unlock_bh(&pm->lock); 600 } 601 return; 602 } 603 604 if (!READ_ONCE(pm->work_pending) && !update_subflows) 605 return; 606 607 spin_lock_bh(&pm->lock); 608 if (update_subflows) 609 __mptcp_pm_close_subflow(msk); 610 611 /* Even if this subflow is not really established, tell the PM to try 612 * to pick the next ones, if possible. 613 */ 614 if (mptcp_pm_nl_check_work_pending(msk)) 615 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 616 617 spin_unlock_bh(&pm->lock); 618 } 619 620 void mptcp_pm_add_addr_received(const struct sock *ssk, 621 const struct mptcp_addr_info *addr) 622 { 623 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 624 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 625 struct mptcp_pm_data *pm = &msk->pm; 626 627 pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id, 628 READ_ONCE(pm->accept_addr)); 629 630 mptcp_event_addr_announced(ssk, addr); 631 632 spin_lock_bh(&pm->lock); 633 634 if (mptcp_pm_is_userspace(msk)) { 635 if (mptcp_userspace_pm_active(msk)) { 636 mptcp_pm_announce_addr(msk, addr, true); 637 mptcp_pm_add_addr_send_ack(msk); 638 } else { 639 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 640 } 641 /* - id0 should not have a different address 642 * - special case for C-flag: linked to fill_local_addresses_vec() 643 */ 644 } else if ((addr->id == 0 && !mptcp_pm_is_init_remote_addr(msk, addr)) || 645 (addr->id > 0 && !READ_ONCE(pm->accept_addr) && 646 !mptcp_pm_add_addr_c_flag_case(msk))) { 647 mptcp_pm_announce_addr(msk, addr, true); 648 mptcp_pm_add_addr_send_ack(msk); 649 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 650 pm->remote = *addr; 651 } else { 652 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 653 } 654 655 spin_unlock_bh(&pm->lock); 656 } 657 658 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, 659 const struct mptcp_addr_info *addr) 660 { 661 struct mptcp_pm_data *pm = &msk->pm; 662 663 pr_debug("msk=%p\n", msk); 664 665 if (!READ_ONCE(pm->work_pending)) 666 return; 667 668 spin_lock_bh(&pm->lock); 669 670 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) 671 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 672 673 spin_unlock_bh(&pm->lock); 674 } 675 676 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 677 { 678 if (!mptcp_pm_should_add_signal(msk)) 679 return; 680 681 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 682 } 683 684 static void mptcp_pm_rm_addr_or_subflow(struct mptcp_sock *msk, 685 const struct mptcp_rm_list *rm_list, 686 enum linux_mptcp_mib_field rm_type) 687 { 688 struct mptcp_subflow_context *subflow, *tmp; 689 struct sock *sk = (struct sock *)msk; 690 u8 i; 691 692 pr_debug("%s rm_list_nr %d\n", 693 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); 694 695 msk_owned_by_me(msk); 696 697 if (sk->sk_state == TCP_LISTEN) 698 return; 699 700 if (!rm_list->nr) 701 return; 702 703 if (list_empty(&msk->conn_list)) 704 return; 705 706 for (i = 0; i < rm_list->nr; i++) { 707 u8 rm_id = rm_list->ids[i]; 708 bool removed = false; 709 710 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 711 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 712 u8 remote_id = READ_ONCE(subflow->remote_id); 713 int how = RCV_SHUTDOWN | SEND_SHUTDOWN; 714 u8 id = subflow_get_local_id(subflow); 715 716 if ((1 << inet_sk_state_load(ssk)) & 717 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE)) 718 continue; 719 if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) 720 continue; 721 if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) 722 continue; 723 724 pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", 725 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", 726 i, rm_id, id, remote_id, msk->mpc_endpoint_id); 727 spin_unlock_bh(&msk->pm.lock); 728 mptcp_subflow_shutdown(sk, ssk, how); 729 removed |= subflow->request_join; 730 731 /* the following takes care of updating the subflows counter */ 732 mptcp_close_ssk(sk, ssk, subflow); 733 spin_lock_bh(&msk->pm.lock); 734 735 if (rm_type == MPTCP_MIB_RMSUBFLOW) 736 __MPTCP_INC_STATS(sock_net(sk), rm_type); 737 } 738 739 if (rm_type == MPTCP_MIB_RMADDR) { 740 __MPTCP_INC_STATS(sock_net(sk), rm_type); 741 if (removed && mptcp_pm_is_kernel(msk)) 742 mptcp_pm_nl_rm_addr(msk, rm_id); 743 } 744 } 745 } 746 747 static void mptcp_pm_rm_addr_recv(struct mptcp_sock *msk) 748 { 749 mptcp_pm_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); 750 } 751 752 void mptcp_pm_rm_subflow(struct mptcp_sock *msk, 753 const struct mptcp_rm_list *rm_list) 754 { 755 mptcp_pm_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); 756 } 757 758 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, 759 const struct mptcp_rm_list *rm_list) 760 { 761 struct mptcp_pm_data *pm = &msk->pm; 762 u8 i; 763 764 pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr); 765 766 for (i = 0; i < rm_list->nr; i++) 767 mptcp_event_addr_removed(msk, rm_list->ids[i]); 768 769 spin_lock_bh(&pm->lock); 770 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) 771 pm->rm_list_rx = *rm_list; 772 else 773 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); 774 spin_unlock_bh(&pm->lock); 775 } 776 777 void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) 778 { 779 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 780 struct sock *sk = subflow->conn; 781 struct mptcp_sock *msk; 782 783 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); 784 msk = mptcp_sk(sk); 785 if (subflow->backup != bkup) 786 subflow->backup = bkup; 787 788 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); 789 } 790 791 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) 792 { 793 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 794 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 795 796 pr_debug("fail_seq=%llu\n", fail_seq); 797 798 /* After accepting the fail, we can't create any other subflows */ 799 spin_lock_bh(&msk->fallback_lock); 800 if (!msk->allow_infinite_fallback) { 801 spin_unlock_bh(&msk->fallback_lock); 802 return; 803 } 804 msk->allow_subflows = false; 805 spin_unlock_bh(&msk->fallback_lock); 806 807 if (!subflow->fail_tout) { 808 pr_debug("send MP_FAIL response and infinite map\n"); 809 810 subflow->send_mp_fail = 1; 811 subflow->send_infinite_map = 1; 812 tcp_send_ack(sk); 813 } else { 814 pr_debug("MP_FAIL response received\n"); 815 WRITE_ONCE(subflow->fail_tout, 0); 816 } 817 } 818 819 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, 820 unsigned int opt_size, unsigned int remaining, 821 struct mptcp_addr_info *addr, bool *echo, 822 bool *drop_other_suboptions) 823 { 824 int ret = false; 825 u8 add_addr; 826 u8 family; 827 bool port; 828 829 spin_lock_bh(&msk->pm.lock); 830 831 /* double check after the lock is acquired */ 832 if (!mptcp_pm_should_add_signal(msk)) 833 goto out_unlock; 834 835 /* always drop every other options for pure ack ADD_ADDR; this is a 836 * plain dup-ack from TCP perspective. The other MPTCP-relevant info, 837 * if any, will be carried by the 'original' TCP ack 838 */ 839 if (skb && skb_is_tcp_pure_ack(skb)) { 840 remaining += opt_size; 841 *drop_other_suboptions = true; 842 } 843 844 *echo = mptcp_pm_should_add_signal_echo(msk); 845 port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); 846 847 family = *echo ? msk->pm.remote.family : msk->pm.local.family; 848 if (remaining < mptcp_add_addr_len(family, *echo, port)) 849 goto out_unlock; 850 851 if (*echo) { 852 *addr = msk->pm.remote; 853 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); 854 } else { 855 *addr = msk->pm.local; 856 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); 857 } 858 WRITE_ONCE(msk->pm.addr_signal, add_addr); 859 ret = true; 860 861 out_unlock: 862 spin_unlock_bh(&msk->pm.lock); 863 return ret; 864 } 865 866 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 867 struct mptcp_rm_list *rm_list) 868 { 869 int ret = false, len; 870 u8 rm_addr; 871 872 spin_lock_bh(&msk->pm.lock); 873 874 /* double check after the lock is acquired */ 875 if (!mptcp_pm_should_rm_signal(msk)) 876 goto out_unlock; 877 878 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); 879 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); 880 if (len < 0) { 881 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 882 goto out_unlock; 883 } 884 if (remaining < len) 885 goto out_unlock; 886 887 *rm_list = msk->pm.rm_list_tx; 888 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 889 ret = true; 890 891 out_unlock: 892 spin_unlock_bh(&msk->pm.lock); 893 return ret; 894 } 895 896 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 897 { 898 struct mptcp_pm_addr_entry skc_local = { 0 }; 899 struct mptcp_addr_info msk_local; 900 901 if (WARN_ON_ONCE(!msk)) 902 return -1; 903 904 /* The 0 ID mapping is defined by the first subflow, copied into the msk 905 * addr 906 */ 907 mptcp_local_address((struct sock_common *)msk, &msk_local); 908 mptcp_local_address((struct sock_common *)skc, &skc_local.addr); 909 if (mptcp_addresses_equal(&msk_local, &skc_local.addr, false)) 910 return 0; 911 912 skc_local.addr.id = 0; 913 skc_local.flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; 914 915 if (mptcp_pm_is_userspace(msk)) 916 return mptcp_userspace_pm_get_local_id(msk, &skc_local); 917 return mptcp_pm_nl_get_local_id(msk, &skc_local); 918 } 919 920 bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc) 921 { 922 struct mptcp_addr_info skc_local; 923 924 mptcp_local_address((struct sock_common *)skc, &skc_local); 925 926 if (mptcp_pm_is_userspace(msk)) 927 return mptcp_userspace_pm_is_backup(msk, &skc_local); 928 929 return mptcp_pm_nl_is_backup(msk, &skc_local); 930 } 931 932 static void mptcp_pm_subflows_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 933 { 934 struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk); 935 struct sock *sk = (struct sock *)msk; 936 unsigned int active_max_loss_cnt; 937 struct net *net = sock_net(sk); 938 unsigned int stale_loss_cnt; 939 bool slow; 940 941 stale_loss_cnt = mptcp_stale_loss_cnt(net); 942 if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt) 943 return; 944 945 /* look for another available subflow not in loss state */ 946 active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1); 947 mptcp_for_each_subflow(msk, iter) { 948 if (iter != subflow && mptcp_subflow_active(iter) && 949 iter->stale_count < active_max_loss_cnt) { 950 /* we have some alternatives, try to mark this subflow as idle ...*/ 951 slow = lock_sock_fast(ssk); 952 if (!tcp_rtx_and_write_queues_empty(ssk)) { 953 subflow->stale = 1; 954 __mptcp_retransmit_pending_data(sk); 955 MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE); 956 } 957 unlock_sock_fast(ssk, slow); 958 959 /* always try to push the pending data regardless of re-injections: 960 * we can possibly use backup subflows now, and subflow selection 961 * is cheap under the msk socket lock 962 */ 963 __mptcp_push_pending(sk, 0); 964 return; 965 } 966 } 967 } 968 969 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 970 { 971 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 972 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); 973 974 /* keep track of rtx periods with no progress */ 975 if (!subflow->stale_count) { 976 subflow->stale_rcv_tstamp = rcv_tstamp; 977 subflow->stale_count++; 978 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { 979 if (subflow->stale_count < U8_MAX) 980 subflow->stale_count++; 981 mptcp_pm_subflows_chk_stale(msk, ssk); 982 } else { 983 subflow->stale_count = 0; 984 mptcp_subflow_set_active(subflow); 985 } 986 } 987 988 void mptcp_pm_worker(struct mptcp_sock *msk) 989 { 990 struct mptcp_pm_data *pm = &msk->pm; 991 992 msk_owned_by_me(msk); 993 994 if (!(pm->status & MPTCP_PM_WORK_MASK)) 995 return; 996 997 spin_lock_bh(&msk->pm.lock); 998 999 pr_debug("msk=%p status=%x\n", msk, pm->status); 1000 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { 1001 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); 1002 mptcp_pm_addr_send_ack(msk); 1003 } 1004 if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { 1005 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); 1006 mptcp_pm_rm_addr_recv(msk); 1007 } 1008 __mptcp_pm_kernel_worker(msk); 1009 1010 spin_unlock_bh(&msk->pm.lock); 1011 } 1012 1013 void mptcp_pm_destroy(struct mptcp_sock *msk) 1014 { 1015 mptcp_pm_free_anno_list(msk); 1016 1017 if (mptcp_pm_is_userspace(msk)) 1018 mptcp_userspace_pm_free_local_addr_list(msk); 1019 } 1020 1021 void mptcp_pm_data_reset(struct mptcp_sock *msk) 1022 { 1023 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk)); 1024 struct mptcp_pm_data *pm = &msk->pm; 1025 1026 memset(&pm->reset, 0, sizeof(pm->reset)); 1027 pm->rm_list_tx.nr = 0; 1028 pm->rm_list_rx.nr = 0; 1029 WRITE_ONCE(pm->pm_type, pm_type); 1030 1031 if (pm_type == MPTCP_PM_TYPE_KERNEL) { 1032 bool subflows_allowed = !!mptcp_pm_get_limit_extra_subflows(msk); 1033 1034 /* pm->work_pending must be only be set to 'true' when 1035 * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL 1036 */ 1037 WRITE_ONCE(pm->work_pending, 1038 (!!mptcp_pm_get_endp_subflow_max(msk) && 1039 subflows_allowed) || 1040 !!mptcp_pm_get_endp_signal_max(msk)); 1041 WRITE_ONCE(pm->accept_addr, 1042 !!mptcp_pm_get_limit_add_addr_accepted(msk) && 1043 subflows_allowed); 1044 WRITE_ONCE(pm->accept_subflow, subflows_allowed); 1045 1046 bitmap_fill(pm->id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1047 } 1048 } 1049 1050 void mptcp_pm_data_init(struct mptcp_sock *msk) 1051 { 1052 spin_lock_init(&msk->pm.lock); 1053 INIT_LIST_HEAD(&msk->pm.anno_list); 1054 INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list); 1055 mptcp_pm_data_reset(msk); 1056 } 1057 1058 void __init mptcp_pm_init(void) 1059 { 1060 mptcp_pm_kernel_register(); 1061 mptcp_pm_userspace_register(); 1062 mptcp_pm_nl_init(); 1063 } 1064 1065 /* Must be called with rcu read lock held */ 1066 struct mptcp_pm_ops *mptcp_pm_find(const char *name) 1067 { 1068 struct mptcp_pm_ops *pm_ops; 1069 1070 list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) { 1071 if (!strcmp(pm_ops->name, name)) 1072 return pm_ops; 1073 } 1074 1075 return NULL; 1076 } 1077 1078 int mptcp_pm_validate(struct mptcp_pm_ops *pm_ops) 1079 { 1080 return 0; 1081 } 1082 1083 int mptcp_pm_register(struct mptcp_pm_ops *pm_ops) 1084 { 1085 int ret; 1086 1087 ret = mptcp_pm_validate(pm_ops); 1088 if (ret) 1089 return ret; 1090 1091 spin_lock(&mptcp_pm_list_lock); 1092 if (mptcp_pm_find(pm_ops->name)) { 1093 spin_unlock(&mptcp_pm_list_lock); 1094 return -EEXIST; 1095 } 1096 list_add_tail_rcu(&pm_ops->list, &mptcp_pm_list); 1097 spin_unlock(&mptcp_pm_list_lock); 1098 1099 pr_debug("%s registered\n", pm_ops->name); 1100 return 0; 1101 } 1102 1103 void mptcp_pm_unregister(struct mptcp_pm_ops *pm_ops) 1104 { 1105 /* skip unregistering the default path manager */ 1106 if (WARN_ON_ONCE(pm_ops == &mptcp_pm_kernel)) 1107 return; 1108 1109 spin_lock(&mptcp_pm_list_lock); 1110 list_del_rcu(&pm_ops->list); 1111 spin_unlock(&mptcp_pm_list_lock); 1112 } 1113 1114 /* Build string with list of available path manager values. 1115 * Similar to tcp_get_available_congestion_control() 1116 */ 1117 void mptcp_pm_get_available(char *buf, size_t maxlen) 1118 { 1119 struct mptcp_pm_ops *pm_ops; 1120 size_t offs = 0; 1121 1122 rcu_read_lock(); 1123 list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) { 1124 offs += snprintf(buf + offs, maxlen - offs, "%s%s", 1125 offs == 0 ? "" : " ", pm_ops->name); 1126 1127 if (WARN_ON_ONCE(offs >= maxlen)) 1128 break; 1129 } 1130 rcu_read_unlock(); 1131 } 1132