1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2020, Red Hat, Inc. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/inet.h> 10 #include <linux/kernel.h> 11 #include <net/inet_common.h> 12 #include <net/netns/generic.h> 13 #include <net/mptcp.h> 14 15 #include "protocol.h" 16 #include "mib.h" 17 #include "mptcp_pm_gen.h" 18 19 static int pm_nl_pernet_id; 20 21 struct mptcp_pm_add_entry { 22 struct list_head list; 23 struct mptcp_addr_info addr; 24 u8 retrans_times; 25 struct timer_list add_timer; 26 struct mptcp_sock *sock; 27 }; 28 29 struct pm_nl_pernet { 30 /* protects pernet updates */ 31 spinlock_t lock; 32 struct list_head local_addr_list; 33 unsigned int addrs; 34 unsigned int stale_loss_cnt; 35 unsigned int add_addr_signal_max; 36 unsigned int add_addr_accept_max; 37 unsigned int local_addr_max; 38 unsigned int subflows_max; 39 unsigned int next_id; 40 DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 41 }; 42 43 #define MPTCP_PM_ADDR_MAX 8 44 #define ADD_ADDR_RETRANS_MAX 3 45 46 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) 47 { 48 return net_generic(net, pm_nl_pernet_id); 49 } 50 51 static struct pm_nl_pernet * 52 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) 53 { 54 return pm_nl_get_pernet(sock_net((struct sock *)msk)); 55 } 56 57 bool mptcp_addresses_equal(const struct mptcp_addr_info *a, 58 const struct mptcp_addr_info *b, bool use_port) 59 { 60 bool addr_equals = false; 61 62 if (a->family == b->family) { 63 if (a->family == AF_INET) 64 addr_equals = a->addr.s_addr == b->addr.s_addr; 65 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 66 else 67 addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6); 68 } else if (a->family == AF_INET) { 69 if (ipv6_addr_v4mapped(&b->addr6)) 70 addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3]; 71 } else if (b->family == AF_INET) { 72 if (ipv6_addr_v4mapped(&a->addr6)) 73 addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr; 74 #endif 75 } 76 77 if (!addr_equals) 78 return false; 79 if (!use_port) 80 return true; 81 82 return a->port == b->port; 83 } 84 85 void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr) 86 { 87 addr->family = skc->skc_family; 88 addr->port = htons(skc->skc_num); 89 if (addr->family == AF_INET) 90 addr->addr.s_addr = skc->skc_rcv_saddr; 91 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 92 else if (addr->family == AF_INET6) 93 addr->addr6 = skc->skc_v6_rcv_saddr; 94 #endif 95 } 96 97 static void remote_address(const struct sock_common *skc, 98 struct mptcp_addr_info *addr) 99 { 100 addr->family = skc->skc_family; 101 addr->port = skc->skc_dport; 102 if (addr->family == AF_INET) 103 addr->addr.s_addr = skc->skc_daddr; 104 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 105 else if (addr->family == AF_INET6) 106 addr->addr6 = skc->skc_v6_daddr; 107 #endif 108 } 109 110 static bool lookup_subflow_by_saddr(const struct list_head *list, 111 const struct mptcp_addr_info *saddr) 112 { 113 struct mptcp_subflow_context *subflow; 114 struct mptcp_addr_info cur; 115 struct sock_common *skc; 116 117 list_for_each_entry(subflow, list, node) { 118 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); 119 120 mptcp_local_address(skc, &cur); 121 if (mptcp_addresses_equal(&cur, saddr, saddr->port)) 122 return true; 123 } 124 125 return false; 126 } 127 128 static bool lookup_subflow_by_daddr(const struct list_head *list, 129 const struct mptcp_addr_info *daddr) 130 { 131 struct mptcp_subflow_context *subflow; 132 struct mptcp_addr_info cur; 133 134 list_for_each_entry(subflow, list, node) { 135 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 136 137 if (!((1 << inet_sk_state_load(ssk)) & 138 (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) 139 continue; 140 141 remote_address((struct sock_common *)ssk, &cur); 142 if (mptcp_addresses_equal(&cur, daddr, daddr->port)) 143 return true; 144 } 145 146 return false; 147 } 148 149 static bool 150 select_local_address(const struct pm_nl_pernet *pernet, 151 const struct mptcp_sock *msk, 152 struct mptcp_pm_local *new_local) 153 { 154 struct mptcp_pm_addr_entry *entry; 155 bool found = false; 156 157 msk_owned_by_me(msk); 158 159 rcu_read_lock(); 160 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 161 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) 162 continue; 163 164 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 165 continue; 166 167 new_local->addr = entry->addr; 168 new_local->flags = entry->flags; 169 new_local->ifindex = entry->ifindex; 170 found = true; 171 break; 172 } 173 rcu_read_unlock(); 174 175 return found; 176 } 177 178 static bool 179 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, 180 struct mptcp_pm_local *new_local) 181 { 182 struct mptcp_pm_addr_entry *entry; 183 bool found = false; 184 185 rcu_read_lock(); 186 /* do not keep any additional per socket state, just signal 187 * the address list in order. 188 * Note: removal from the local address list during the msk life-cycle 189 * can lead to additional addresses not being announced. 190 */ 191 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 192 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 193 continue; 194 195 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) 196 continue; 197 198 new_local->addr = entry->addr; 199 new_local->flags = entry->flags; 200 new_local->ifindex = entry->ifindex; 201 found = true; 202 break; 203 } 204 rcu_read_unlock(); 205 206 return found; 207 } 208 209 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk) 210 { 211 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 212 213 return READ_ONCE(pernet->add_addr_signal_max); 214 } 215 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max); 216 217 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk) 218 { 219 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 220 221 return READ_ONCE(pernet->add_addr_accept_max); 222 } 223 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max); 224 225 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk) 226 { 227 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 228 229 return READ_ONCE(pernet->subflows_max); 230 } 231 EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max); 232 233 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk) 234 { 235 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 236 237 return READ_ONCE(pernet->local_addr_max); 238 } 239 EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max); 240 241 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) 242 { 243 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 244 245 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) || 246 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, 247 MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { 248 WRITE_ONCE(msk->pm.work_pending, false); 249 return false; 250 } 251 return true; 252 } 253 254 struct mptcp_pm_add_entry * 255 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, 256 const struct mptcp_addr_info *addr) 257 { 258 struct mptcp_pm_add_entry *entry; 259 260 lockdep_assert_held(&msk->pm.lock); 261 262 list_for_each_entry(entry, &msk->pm.anno_list, list) { 263 if (mptcp_addresses_equal(&entry->addr, addr, true)) 264 return entry; 265 } 266 267 return NULL; 268 } 269 270 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) 271 { 272 struct mptcp_pm_add_entry *entry; 273 struct mptcp_addr_info saddr; 274 bool ret = false; 275 276 mptcp_local_address((struct sock_common *)sk, &saddr); 277 278 spin_lock_bh(&msk->pm.lock); 279 list_for_each_entry(entry, &msk->pm.anno_list, list) { 280 if (mptcp_addresses_equal(&entry->addr, &saddr, true)) { 281 ret = true; 282 goto out; 283 } 284 } 285 286 out: 287 spin_unlock_bh(&msk->pm.lock); 288 return ret; 289 } 290 291 static void mptcp_pm_add_timer(struct timer_list *timer) 292 { 293 struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); 294 struct mptcp_sock *msk = entry->sock; 295 struct sock *sk = (struct sock *)msk; 296 297 pr_debug("msk=%p\n", msk); 298 299 if (!msk) 300 return; 301 302 if (inet_sk_state_load(sk) == TCP_CLOSE) 303 return; 304 305 if (!entry->addr.id) 306 return; 307 308 if (mptcp_pm_should_add_signal_addr(msk)) { 309 sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8); 310 goto out; 311 } 312 313 spin_lock_bh(&msk->pm.lock); 314 315 if (!mptcp_pm_should_add_signal_addr(msk)) { 316 pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); 317 mptcp_pm_announce_addr(msk, &entry->addr, false); 318 mptcp_pm_add_addr_send_ack(msk); 319 entry->retrans_times++; 320 } 321 322 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) 323 sk_reset_timer(sk, timer, 324 jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); 325 326 spin_unlock_bh(&msk->pm.lock); 327 328 if (entry->retrans_times == ADD_ADDR_RETRANS_MAX) 329 mptcp_pm_subflow_established(msk); 330 331 out: 332 __sock_put(sk); 333 } 334 335 struct mptcp_pm_add_entry * 336 mptcp_pm_del_add_timer(struct mptcp_sock *msk, 337 const struct mptcp_addr_info *addr, bool check_id) 338 { 339 struct mptcp_pm_add_entry *entry; 340 struct sock *sk = (struct sock *)msk; 341 struct timer_list *add_timer = NULL; 342 343 spin_lock_bh(&msk->pm.lock); 344 entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 345 if (entry && (!check_id || entry->addr.id == addr->id)) { 346 entry->retrans_times = ADD_ADDR_RETRANS_MAX; 347 add_timer = &entry->add_timer; 348 } 349 if (!check_id && entry) 350 list_del(&entry->list); 351 spin_unlock_bh(&msk->pm.lock); 352 353 /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */ 354 if (add_timer) 355 sk_stop_timer_sync(sk, add_timer); 356 357 return entry; 358 } 359 360 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, 361 const struct mptcp_addr_info *addr) 362 { 363 struct mptcp_pm_add_entry *add_entry = NULL; 364 struct sock *sk = (struct sock *)msk; 365 struct net *net = sock_net(sk); 366 367 lockdep_assert_held(&msk->pm.lock); 368 369 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 370 371 if (add_entry) { 372 if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) 373 return false; 374 375 sk_reset_timer(sk, &add_entry->add_timer, 376 jiffies + mptcp_get_add_addr_timeout(net)); 377 return true; 378 } 379 380 add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); 381 if (!add_entry) 382 return false; 383 384 list_add(&add_entry->list, &msk->pm.anno_list); 385 386 add_entry->addr = *addr; 387 add_entry->sock = msk; 388 add_entry->retrans_times = 0; 389 390 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); 391 sk_reset_timer(sk, &add_entry->add_timer, 392 jiffies + mptcp_get_add_addr_timeout(net)); 393 394 return true; 395 } 396 397 void mptcp_pm_free_anno_list(struct mptcp_sock *msk) 398 { 399 struct mptcp_pm_add_entry *entry, *tmp; 400 struct sock *sk = (struct sock *)msk; 401 LIST_HEAD(free_list); 402 403 pr_debug("msk=%p\n", msk); 404 405 spin_lock_bh(&msk->pm.lock); 406 list_splice_init(&msk->pm.anno_list, &free_list); 407 spin_unlock_bh(&msk->pm.lock); 408 409 list_for_each_entry_safe(entry, tmp, &free_list, list) { 410 sk_stop_timer_sync(sk, &entry->add_timer); 411 kfree(entry); 412 } 413 } 414 415 /* Fill all the remote addresses into the array addrs[], 416 * and return the array size. 417 */ 418 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, 419 struct mptcp_addr_info *local, 420 bool fullmesh, 421 struct mptcp_addr_info *addrs) 422 { 423 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); 424 struct sock *sk = (struct sock *)msk, *ssk; 425 struct mptcp_subflow_context *subflow; 426 struct mptcp_addr_info remote = { 0 }; 427 unsigned int subflows_max; 428 int i = 0; 429 430 subflows_max = mptcp_pm_get_subflows_max(msk); 431 remote_address((struct sock_common *)sk, &remote); 432 433 /* Non-fullmesh endpoint, fill in the single entry 434 * corresponding to the primary MPC subflow remote address 435 */ 436 if (!fullmesh) { 437 if (deny_id0) 438 return 0; 439 440 if (!mptcp_pm_addr_families_match(sk, local, &remote)) 441 return 0; 442 443 msk->pm.subflows++; 444 addrs[i++] = remote; 445 } else { 446 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 447 448 /* Forbid creation of new subflows matching existing 449 * ones, possibly already created by incoming ADD_ADDR 450 */ 451 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 452 mptcp_for_each_subflow(msk, subflow) 453 if (READ_ONCE(subflow->local_id) == local->id) 454 __set_bit(subflow->remote_id, unavail_id); 455 456 mptcp_for_each_subflow(msk, subflow) { 457 ssk = mptcp_subflow_tcp_sock(subflow); 458 remote_address((struct sock_common *)ssk, &addrs[i]); 459 addrs[i].id = READ_ONCE(subflow->remote_id); 460 if (deny_id0 && !addrs[i].id) 461 continue; 462 463 if (test_bit(addrs[i].id, unavail_id)) 464 continue; 465 466 if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) 467 continue; 468 469 if (msk->pm.subflows < subflows_max) { 470 /* forbid creating multiple address towards 471 * this id 472 */ 473 __set_bit(addrs[i].id, unavail_id); 474 msk->pm.subflows++; 475 i++; 476 } 477 } 478 } 479 480 return i; 481 } 482 483 static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 484 bool prio, bool backup) 485 { 486 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 487 bool slow; 488 489 pr_debug("send ack for %s\n", 490 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); 491 492 slow = lock_sock_fast(ssk); 493 if (prio) { 494 subflow->send_mp_prio = 1; 495 subflow->request_bkup = backup; 496 } 497 498 __mptcp_subflow_send_ack(ssk); 499 unlock_sock_fast(ssk, slow); 500 } 501 502 static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 503 bool prio, bool backup) 504 { 505 spin_unlock_bh(&msk->pm.lock); 506 __mptcp_pm_send_ack(msk, subflow, prio, backup); 507 spin_lock_bh(&msk->pm.lock); 508 } 509 510 static struct mptcp_pm_addr_entry * 511 __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) 512 { 513 struct mptcp_pm_addr_entry *entry; 514 515 list_for_each_entry(entry, &pernet->local_addr_list, list) { 516 if (entry->addr.id == id) 517 return entry; 518 } 519 return NULL; 520 } 521 522 static struct mptcp_pm_addr_entry * 523 __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) 524 { 525 struct mptcp_pm_addr_entry *entry; 526 527 list_for_each_entry(entry, &pernet->local_addr_list, list) { 528 if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) 529 return entry; 530 } 531 return NULL; 532 } 533 534 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) 535 { 536 struct sock *sk = (struct sock *)msk; 537 unsigned int add_addr_signal_max; 538 bool signal_and_subflow = false; 539 unsigned int local_addr_max; 540 struct pm_nl_pernet *pernet; 541 struct mptcp_pm_local local; 542 unsigned int subflows_max; 543 544 pernet = pm_nl_get_pernet(sock_net(sk)); 545 546 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk); 547 local_addr_max = mptcp_pm_get_local_addr_max(msk); 548 subflows_max = mptcp_pm_get_subflows_max(msk); 549 550 /* do lazy endpoint usage accounting for the MPC subflows */ 551 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) { 552 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first); 553 struct mptcp_pm_addr_entry *entry; 554 struct mptcp_addr_info mpc_addr; 555 bool backup = false; 556 557 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); 558 rcu_read_lock(); 559 entry = __lookup_addr(pernet, &mpc_addr); 560 if (entry) { 561 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); 562 msk->mpc_endpoint_id = entry->addr.id; 563 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 564 } 565 rcu_read_unlock(); 566 567 if (backup) 568 mptcp_pm_send_ack(msk, subflow, true, backup); 569 570 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); 571 } 572 573 pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", 574 msk->pm.local_addr_used, local_addr_max, 575 msk->pm.add_addr_signaled, add_addr_signal_max, 576 msk->pm.subflows, subflows_max); 577 578 /* check first for announce */ 579 if (msk->pm.add_addr_signaled < add_addr_signal_max) { 580 /* due to racing events on both ends we can reach here while 581 * previous add address is still running: if we invoke now 582 * mptcp_pm_announce_addr(), that will fail and the 583 * corresponding id will be marked as used. 584 * Instead let the PM machinery reschedule us when the 585 * current address announce will be completed. 586 */ 587 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) 588 return; 589 590 if (!select_signal_address(pernet, msk, &local)) 591 goto subflow; 592 593 /* If the alloc fails, we are on memory pressure, not worth 594 * continuing, and trying to create subflows. 595 */ 596 if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) 597 return; 598 599 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 600 msk->pm.add_addr_signaled++; 601 602 /* Special case for ID0: set the correct ID */ 603 if (local.addr.id == msk->mpc_endpoint_id) 604 local.addr.id = 0; 605 606 mptcp_pm_announce_addr(msk, &local.addr, false); 607 mptcp_pm_nl_addr_send_ack(msk); 608 609 if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 610 signal_and_subflow = true; 611 } 612 613 subflow: 614 /* check if should create a new subflow */ 615 while (msk->pm.local_addr_used < local_addr_max && 616 msk->pm.subflows < subflows_max) { 617 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; 618 bool fullmesh; 619 int i, nr; 620 621 if (signal_and_subflow) 622 signal_and_subflow = false; 623 else if (!select_local_address(pernet, msk, &local)) 624 break; 625 626 fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); 627 628 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 629 630 /* Special case for ID0: set the correct ID */ 631 if (local.addr.id == msk->mpc_endpoint_id) 632 local.addr.id = 0; 633 else /* local_addr_used is not decr for ID 0 */ 634 msk->pm.local_addr_used++; 635 636 nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); 637 if (nr == 0) 638 continue; 639 640 spin_unlock_bh(&msk->pm.lock); 641 for (i = 0; i < nr; i++) 642 __mptcp_subflow_connect(sk, &local, &addrs[i]); 643 spin_lock_bh(&msk->pm.lock); 644 } 645 mptcp_pm_nl_check_work_pending(msk); 646 } 647 648 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) 649 { 650 mptcp_pm_create_subflow_or_signal_addr(msk); 651 } 652 653 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) 654 { 655 mptcp_pm_create_subflow_or_signal_addr(msk); 656 } 657 658 /* Fill all the local addresses into the array addrs[], 659 * and return the array size. 660 */ 661 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, 662 struct mptcp_addr_info *remote, 663 struct mptcp_pm_local *locals) 664 { 665 struct sock *sk = (struct sock *)msk; 666 struct mptcp_pm_addr_entry *entry; 667 struct mptcp_addr_info mpc_addr; 668 struct pm_nl_pernet *pernet; 669 unsigned int subflows_max; 670 int i = 0; 671 672 pernet = pm_nl_get_pernet_from_msk(msk); 673 subflows_max = mptcp_pm_get_subflows_max(msk); 674 675 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 676 677 rcu_read_lock(); 678 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 679 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) 680 continue; 681 682 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) 683 continue; 684 685 if (msk->pm.subflows < subflows_max) { 686 locals[i].addr = entry->addr; 687 locals[i].flags = entry->flags; 688 locals[i].ifindex = entry->ifindex; 689 690 /* Special case for ID0: set the correct ID */ 691 if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port)) 692 locals[i].addr.id = 0; 693 694 msk->pm.subflows++; 695 i++; 696 } 697 } 698 rcu_read_unlock(); 699 700 /* If the array is empty, fill in the single 701 * 'IPADDRANY' local address 702 */ 703 if (!i) { 704 memset(&locals[i], 0, sizeof(locals[i])); 705 locals[i].addr.family = 706 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 707 remote->family == AF_INET6 && 708 ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : 709 #endif 710 remote->family; 711 712 if (!mptcp_pm_addr_families_match(sk, &locals[i].addr, remote)) 713 return 0; 714 715 msk->pm.subflows++; 716 i++; 717 } 718 719 return i; 720 } 721 722 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) 723 { 724 struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX]; 725 struct sock *sk = (struct sock *)msk; 726 unsigned int add_addr_accept_max; 727 struct mptcp_addr_info remote; 728 unsigned int subflows_max; 729 bool sf_created = false; 730 int i, nr; 731 732 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); 733 subflows_max = mptcp_pm_get_subflows_max(msk); 734 735 pr_debug("accepted %d:%d remote family %d\n", 736 msk->pm.add_addr_accepted, add_addr_accept_max, 737 msk->pm.remote.family); 738 739 remote = msk->pm.remote; 740 mptcp_pm_announce_addr(msk, &remote, true); 741 mptcp_pm_nl_addr_send_ack(msk); 742 743 if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) 744 return; 745 746 /* pick id 0 port, if none is provided the remote address */ 747 if (!remote.port) 748 remote.port = sk->sk_dport; 749 750 /* connect to the specified remote address, using whatever 751 * local address the routing configuration will pick. 752 */ 753 nr = fill_local_addresses_vec(msk, &remote, locals); 754 if (nr == 0) 755 return; 756 757 spin_unlock_bh(&msk->pm.lock); 758 for (i = 0; i < nr; i++) 759 if (__mptcp_subflow_connect(sk, &locals[i], &remote) == 0) 760 sf_created = true; 761 spin_lock_bh(&msk->pm.lock); 762 763 if (sf_created) { 764 /* add_addr_accepted is not decr for ID 0 */ 765 if (remote.id) 766 msk->pm.add_addr_accepted++; 767 if (msk->pm.add_addr_accepted >= add_addr_accept_max || 768 msk->pm.subflows >= subflows_max) 769 WRITE_ONCE(msk->pm.accept_addr, false); 770 } 771 } 772 773 bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, 774 const struct mptcp_addr_info *remote) 775 { 776 struct mptcp_addr_info mpc_remote; 777 778 remote_address((struct sock_common *)msk, &mpc_remote); 779 return mptcp_addresses_equal(&mpc_remote, remote, remote->port); 780 } 781 782 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) 783 { 784 struct mptcp_subflow_context *subflow; 785 786 msk_owned_by_me(msk); 787 lockdep_assert_held(&msk->pm.lock); 788 789 if (!mptcp_pm_should_add_signal(msk) && 790 !mptcp_pm_should_rm_signal(msk)) 791 return; 792 793 mptcp_for_each_subflow(msk, subflow) { 794 if (__mptcp_subflow_active(subflow)) { 795 mptcp_pm_send_ack(msk, subflow, false, false); 796 break; 797 } 798 } 799 } 800 801 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, 802 struct mptcp_addr_info *addr, 803 struct mptcp_addr_info *rem, 804 u8 bkup) 805 { 806 struct mptcp_subflow_context *subflow; 807 808 pr_debug("bkup=%d\n", bkup); 809 810 mptcp_for_each_subflow(msk, subflow) { 811 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 812 struct mptcp_addr_info local, remote; 813 814 mptcp_local_address((struct sock_common *)ssk, &local); 815 if (!mptcp_addresses_equal(&local, addr, addr->port)) 816 continue; 817 818 if (rem && rem->family != AF_UNSPEC) { 819 remote_address((struct sock_common *)ssk, &remote); 820 if (!mptcp_addresses_equal(&remote, rem, rem->port)) 821 continue; 822 } 823 824 __mptcp_pm_send_ack(msk, subflow, true, bkup); 825 return 0; 826 } 827 828 return -EINVAL; 829 } 830 831 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, 832 const struct mptcp_rm_list *rm_list, 833 enum linux_mptcp_mib_field rm_type) 834 { 835 struct mptcp_subflow_context *subflow, *tmp; 836 struct sock *sk = (struct sock *)msk; 837 u8 i; 838 839 pr_debug("%s rm_list_nr %d\n", 840 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); 841 842 msk_owned_by_me(msk); 843 844 if (sk->sk_state == TCP_LISTEN) 845 return; 846 847 if (!rm_list->nr) 848 return; 849 850 if (list_empty(&msk->conn_list)) 851 return; 852 853 for (i = 0; i < rm_list->nr; i++) { 854 u8 rm_id = rm_list->ids[i]; 855 bool removed = false; 856 857 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 858 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 859 u8 remote_id = READ_ONCE(subflow->remote_id); 860 int how = RCV_SHUTDOWN | SEND_SHUTDOWN; 861 u8 id = subflow_get_local_id(subflow); 862 863 if ((1 << inet_sk_state_load(ssk)) & 864 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE)) 865 continue; 866 if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) 867 continue; 868 if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) 869 continue; 870 871 pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", 872 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", 873 i, rm_id, id, remote_id, msk->mpc_endpoint_id); 874 spin_unlock_bh(&msk->pm.lock); 875 mptcp_subflow_shutdown(sk, ssk, how); 876 removed |= subflow->request_join; 877 878 /* the following takes care of updating the subflows counter */ 879 mptcp_close_ssk(sk, ssk, subflow); 880 spin_lock_bh(&msk->pm.lock); 881 882 if (rm_type == MPTCP_MIB_RMSUBFLOW) 883 __MPTCP_INC_STATS(sock_net(sk), rm_type); 884 } 885 886 if (rm_type == MPTCP_MIB_RMADDR) 887 __MPTCP_INC_STATS(sock_net(sk), rm_type); 888 889 if (!removed) 890 continue; 891 892 if (!mptcp_pm_is_kernel(msk)) 893 continue; 894 895 if (rm_type == MPTCP_MIB_RMADDR && rm_id && 896 !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { 897 /* Note: if the subflow has been closed before, this 898 * add_addr_accepted counter will not be decremented. 899 */ 900 if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk)) 901 WRITE_ONCE(msk->pm.accept_addr, true); 902 } 903 } 904 } 905 906 static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) 907 { 908 mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); 909 } 910 911 static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, 912 const struct mptcp_rm_list *rm_list) 913 { 914 mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); 915 } 916 917 void mptcp_pm_nl_work(struct mptcp_sock *msk) 918 { 919 struct mptcp_pm_data *pm = &msk->pm; 920 921 msk_owned_by_me(msk); 922 923 if (!(pm->status & MPTCP_PM_WORK_MASK)) 924 return; 925 926 spin_lock_bh(&msk->pm.lock); 927 928 pr_debug("msk=%p status=%x\n", msk, pm->status); 929 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { 930 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); 931 mptcp_pm_nl_add_addr_received(msk); 932 } 933 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { 934 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); 935 mptcp_pm_nl_addr_send_ack(msk); 936 } 937 if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { 938 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); 939 mptcp_pm_nl_rm_addr_received(msk); 940 } 941 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { 942 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); 943 mptcp_pm_nl_fully_established(msk); 944 } 945 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { 946 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); 947 mptcp_pm_nl_subflow_established(msk); 948 } 949 950 spin_unlock_bh(&msk->pm.lock); 951 } 952 953 static bool address_use_port(struct mptcp_pm_addr_entry *entry) 954 { 955 return (entry->flags & 956 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == 957 MPTCP_PM_ADDR_FLAG_SIGNAL; 958 } 959 960 /* caller must ensure the RCU grace period is already elapsed */ 961 static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) 962 { 963 if (entry->lsk) 964 sock_release(entry->lsk); 965 kfree(entry); 966 } 967 968 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, 969 struct mptcp_pm_addr_entry *entry, 970 bool needs_id) 971 { 972 struct mptcp_pm_addr_entry *cur, *del_entry = NULL; 973 unsigned int addr_max; 974 int ret = -EINVAL; 975 976 spin_lock_bh(&pernet->lock); 977 /* to keep the code simple, don't do IDR-like allocation for address ID, 978 * just bail when we exceed limits 979 */ 980 if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) 981 pernet->next_id = 1; 982 if (pernet->addrs >= MPTCP_PM_ADDR_MAX) { 983 ret = -ERANGE; 984 goto out; 985 } 986 if (test_bit(entry->addr.id, pernet->id_bitmap)) { 987 ret = -EBUSY; 988 goto out; 989 } 990 991 /* do not insert duplicate address, differentiate on port only 992 * singled addresses 993 */ 994 if (!address_use_port(entry)) 995 entry->addr.port = 0; 996 list_for_each_entry(cur, &pernet->local_addr_list, list) { 997 if (mptcp_addresses_equal(&cur->addr, &entry->addr, 998 cur->addr.port || entry->addr.port)) { 999 /* allow replacing the exiting endpoint only if such 1000 * endpoint is an implicit one and the user-space 1001 * did not provide an endpoint id 1002 */ 1003 if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { 1004 ret = -EEXIST; 1005 goto out; 1006 } 1007 if (entry->addr.id) 1008 goto out; 1009 1010 pernet->addrs--; 1011 entry->addr.id = cur->addr.id; 1012 list_del_rcu(&cur->list); 1013 del_entry = cur; 1014 break; 1015 } 1016 } 1017 1018 if (!entry->addr.id && needs_id) { 1019 find_next: 1020 entry->addr.id = find_next_zero_bit(pernet->id_bitmap, 1021 MPTCP_PM_MAX_ADDR_ID + 1, 1022 pernet->next_id); 1023 if (!entry->addr.id && pernet->next_id != 1) { 1024 pernet->next_id = 1; 1025 goto find_next; 1026 } 1027 } 1028 1029 if (!entry->addr.id && needs_id) 1030 goto out; 1031 1032 __set_bit(entry->addr.id, pernet->id_bitmap); 1033 if (entry->addr.id > pernet->next_id) 1034 pernet->next_id = entry->addr.id; 1035 1036 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1037 addr_max = pernet->add_addr_signal_max; 1038 WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1); 1039 } 1040 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1041 addr_max = pernet->local_addr_max; 1042 WRITE_ONCE(pernet->local_addr_max, addr_max + 1); 1043 } 1044 1045 pernet->addrs++; 1046 if (!entry->addr.port) 1047 list_add_tail_rcu(&entry->list, &pernet->local_addr_list); 1048 else 1049 list_add_rcu(&entry->list, &pernet->local_addr_list); 1050 ret = entry->addr.id; 1051 1052 out: 1053 spin_unlock_bh(&pernet->lock); 1054 1055 /* just replaced an existing entry, free it */ 1056 if (del_entry) { 1057 synchronize_rcu(); 1058 __mptcp_pm_release_addr_entry(del_entry); 1059 } 1060 return ret; 1061 } 1062 1063 static struct lock_class_key mptcp_slock_keys[2]; 1064 static struct lock_class_key mptcp_keys[2]; 1065 1066 static int mptcp_pm_nl_create_listen_socket(struct sock *sk, 1067 struct mptcp_pm_addr_entry *entry) 1068 { 1069 bool is_ipv6 = sk->sk_family == AF_INET6; 1070 int addrlen = sizeof(struct sockaddr_in); 1071 struct sockaddr_storage addr; 1072 struct sock *newsk, *ssk; 1073 int backlog = 1024; 1074 int err; 1075 1076 err = sock_create_kern(sock_net(sk), entry->addr.family, 1077 SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); 1078 if (err) 1079 return err; 1080 1081 newsk = entry->lsk->sk; 1082 if (!newsk) 1083 return -EINVAL; 1084 1085 /* The subflow socket lock is acquired in a nested to the msk one 1086 * in several places, even by the TCP stack, and this msk is a kernel 1087 * socket: lockdep complains. Instead of propagating the _nested 1088 * modifiers in several places, re-init the lock class for the msk 1089 * socket to an mptcp specific one. 1090 */ 1091 sock_lock_init_class_and_name(newsk, 1092 is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", 1093 &mptcp_slock_keys[is_ipv6], 1094 is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", 1095 &mptcp_keys[is_ipv6]); 1096 1097 lock_sock(newsk); 1098 ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); 1099 release_sock(newsk); 1100 if (IS_ERR(ssk)) 1101 return PTR_ERR(ssk); 1102 1103 mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); 1104 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1105 if (entry->addr.family == AF_INET6) 1106 addrlen = sizeof(struct sockaddr_in6); 1107 #endif 1108 if (ssk->sk_family == AF_INET) 1109 err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); 1110 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1111 else if (ssk->sk_family == AF_INET6) 1112 err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); 1113 #endif 1114 if (err) 1115 return err; 1116 1117 /* We don't use mptcp_set_state() here because it needs to be called 1118 * under the msk socket lock. For the moment, that will not bring 1119 * anything more than only calling inet_sk_state_store(), because the 1120 * old status is known (TCP_CLOSE). 1121 */ 1122 inet_sk_state_store(newsk, TCP_LISTEN); 1123 lock_sock(ssk); 1124 WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true); 1125 err = __inet_listen_sk(ssk, backlog); 1126 if (!err) 1127 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); 1128 release_sock(ssk); 1129 return err; 1130 } 1131 1132 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 1133 { 1134 struct mptcp_pm_addr_entry *entry; 1135 struct pm_nl_pernet *pernet; 1136 int ret = -1; 1137 1138 pernet = pm_nl_get_pernet_from_msk(msk); 1139 1140 rcu_read_lock(); 1141 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 1142 if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { 1143 ret = entry->addr.id; 1144 break; 1145 } 1146 } 1147 rcu_read_unlock(); 1148 if (ret >= 0) 1149 return ret; 1150 1151 /* address not found, add to local list */ 1152 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 1153 if (!entry) 1154 return -ENOMEM; 1155 1156 entry->addr = *skc; 1157 entry->addr.id = 0; 1158 entry->addr.port = 0; 1159 entry->ifindex = 0; 1160 entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; 1161 entry->lsk = NULL; 1162 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); 1163 if (ret < 0) 1164 kfree(entry); 1165 1166 return ret; 1167 } 1168 1169 bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 1170 { 1171 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 1172 struct mptcp_pm_addr_entry *entry; 1173 bool backup = false; 1174 1175 rcu_read_lock(); 1176 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 1177 if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { 1178 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1179 break; 1180 } 1181 } 1182 rcu_read_unlock(); 1183 1184 return backup; 1185 } 1186 1187 #define MPTCP_PM_CMD_GRP_OFFSET 0 1188 #define MPTCP_PM_EV_GRP_OFFSET 1 1189 1190 static const struct genl_multicast_group mptcp_pm_mcgrps[] = { 1191 [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, }, 1192 [MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME, 1193 .flags = GENL_MCAST_CAP_NET_ADMIN, 1194 }, 1195 }; 1196 1197 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 1198 { 1199 struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk); 1200 struct sock *sk = (struct sock *)msk; 1201 unsigned int active_max_loss_cnt; 1202 struct net *net = sock_net(sk); 1203 unsigned int stale_loss_cnt; 1204 bool slow; 1205 1206 stale_loss_cnt = mptcp_stale_loss_cnt(net); 1207 if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt) 1208 return; 1209 1210 /* look for another available subflow not in loss state */ 1211 active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1); 1212 mptcp_for_each_subflow(msk, iter) { 1213 if (iter != subflow && mptcp_subflow_active(iter) && 1214 iter->stale_count < active_max_loss_cnt) { 1215 /* we have some alternatives, try to mark this subflow as idle ...*/ 1216 slow = lock_sock_fast(ssk); 1217 if (!tcp_rtx_and_write_queues_empty(ssk)) { 1218 subflow->stale = 1; 1219 __mptcp_retransmit_pending_data(sk); 1220 MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE); 1221 } 1222 unlock_sock_fast(ssk, slow); 1223 1224 /* always try to push the pending data regardless of re-injections: 1225 * we can possibly use backup subflows now, and subflow selection 1226 * is cheap under the msk socket lock 1227 */ 1228 __mptcp_push_pending(sk, 0); 1229 return; 1230 } 1231 } 1232 } 1233 1234 static int mptcp_pm_family_to_addr(int family) 1235 { 1236 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1237 if (family == AF_INET6) 1238 return MPTCP_PM_ADDR_ATTR_ADDR6; 1239 #endif 1240 return MPTCP_PM_ADDR_ATTR_ADDR4; 1241 } 1242 1243 static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[], 1244 const struct nlattr *attr, 1245 struct genl_info *info, 1246 struct mptcp_addr_info *addr, 1247 bool require_family) 1248 { 1249 int err, addr_addr; 1250 1251 if (!attr) { 1252 GENL_SET_ERR_MSG(info, "missing address info"); 1253 return -EINVAL; 1254 } 1255 1256 /* no validation needed - was already done via nested policy */ 1257 err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 1258 mptcp_pm_address_nl_policy, info->extack); 1259 if (err) 1260 return err; 1261 1262 if (tb[MPTCP_PM_ADDR_ATTR_ID]) 1263 addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]); 1264 1265 if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) { 1266 if (!require_family) 1267 return 0; 1268 1269 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1270 "missing family"); 1271 return -EINVAL; 1272 } 1273 1274 addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]); 1275 if (addr->family != AF_INET 1276 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1277 && addr->family != AF_INET6 1278 #endif 1279 ) { 1280 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1281 "unknown address family"); 1282 return -EINVAL; 1283 } 1284 addr_addr = mptcp_pm_family_to_addr(addr->family); 1285 if (!tb[addr_addr]) { 1286 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1287 "missing address data"); 1288 return -EINVAL; 1289 } 1290 1291 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1292 if (addr->family == AF_INET6) 1293 addr->addr6 = nla_get_in6_addr(tb[addr_addr]); 1294 else 1295 #endif 1296 addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]); 1297 1298 if (tb[MPTCP_PM_ADDR_ATTR_PORT]) 1299 addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); 1300 1301 return 0; 1302 } 1303 1304 int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, 1305 struct mptcp_addr_info *addr) 1306 { 1307 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1308 1309 memset(addr, 0, sizeof(*addr)); 1310 1311 return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true); 1312 } 1313 1314 int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, 1315 bool require_family, 1316 struct mptcp_pm_addr_entry *entry) 1317 { 1318 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1319 int err; 1320 1321 memset(entry, 0, sizeof(*entry)); 1322 1323 err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family); 1324 if (err) 1325 return err; 1326 1327 if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) { 1328 u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]); 1329 1330 entry->ifindex = val; 1331 } 1332 1333 if (tb[MPTCP_PM_ADDR_ATTR_FLAGS]) 1334 entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]); 1335 1336 if (tb[MPTCP_PM_ADDR_ATTR_PORT]) 1337 entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); 1338 1339 return 0; 1340 } 1341 1342 static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) 1343 { 1344 return pm_nl_get_pernet(genl_info_net(info)); 1345 } 1346 1347 static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, 1348 struct mptcp_addr_info *addr) 1349 { 1350 struct mptcp_sock *msk; 1351 long s_slot = 0, s_num = 0; 1352 1353 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1354 struct sock *sk = (struct sock *)msk; 1355 struct mptcp_addr_info mpc_addr; 1356 1357 if (!READ_ONCE(msk->fully_established) || 1358 mptcp_pm_is_userspace(msk)) 1359 goto next; 1360 1361 /* if the endp linked to the init sf is re-added with a != ID */ 1362 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 1363 1364 lock_sock(sk); 1365 spin_lock_bh(&msk->pm.lock); 1366 if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) 1367 msk->mpc_endpoint_id = addr->id; 1368 mptcp_pm_create_subflow_or_signal_addr(msk); 1369 spin_unlock_bh(&msk->pm.lock); 1370 release_sock(sk); 1371 1372 next: 1373 sock_put(sk); 1374 cond_resched(); 1375 } 1376 1377 return 0; 1378 } 1379 1380 static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, 1381 struct genl_info *info) 1382 { 1383 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1384 1385 if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 1386 mptcp_pm_address_nl_policy, info->extack) && 1387 tb[MPTCP_PM_ADDR_ATTR_ID]) 1388 return true; 1389 return false; 1390 } 1391 1392 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) 1393 { 1394 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1395 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1396 struct mptcp_pm_addr_entry addr, *entry; 1397 int ret; 1398 1399 ret = mptcp_pm_parse_entry(attr, info, true, &addr); 1400 if (ret < 0) 1401 return ret; 1402 1403 if (addr.addr.port && !address_use_port(&addr)) { 1404 GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); 1405 return -EINVAL; 1406 } 1407 1408 if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && 1409 addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1410 GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh"); 1411 return -EINVAL; 1412 } 1413 1414 if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { 1415 GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint"); 1416 return -EINVAL; 1417 } 1418 1419 entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT); 1420 if (!entry) { 1421 GENL_SET_ERR_MSG(info, "can't allocate addr"); 1422 return -ENOMEM; 1423 } 1424 1425 *entry = addr; 1426 if (entry->addr.port) { 1427 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); 1428 if (ret) { 1429 GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); 1430 goto out_free; 1431 } 1432 } 1433 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, 1434 !mptcp_pm_has_addr_attr_id(attr, info)); 1435 if (ret < 0) { 1436 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); 1437 goto out_free; 1438 } 1439 1440 mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); 1441 return 0; 1442 1443 out_free: 1444 __mptcp_pm_release_addr_entry(entry); 1445 return ret; 1446 } 1447 1448 static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, 1449 const struct mptcp_addr_info *addr) 1450 { 1451 struct mptcp_pm_add_entry *entry; 1452 1453 entry = mptcp_pm_del_add_timer(msk, addr, false); 1454 if (entry) { 1455 kfree(entry); 1456 return true; 1457 } 1458 1459 return false; 1460 } 1461 1462 static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, 1463 const struct mptcp_addr_info *addr) 1464 { 1465 return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; 1466 } 1467 1468 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, 1469 const struct mptcp_addr_info *addr, 1470 bool force) 1471 { 1472 struct mptcp_rm_list list = { .nr = 0 }; 1473 bool ret; 1474 1475 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1476 1477 ret = remove_anno_list_by_saddr(msk, addr); 1478 if (ret || force) { 1479 spin_lock_bh(&msk->pm.lock); 1480 if (ret) { 1481 __set_bit(addr->id, msk->pm.id_avail_bitmap); 1482 msk->pm.add_addr_signaled--; 1483 } 1484 mptcp_pm_remove_addr(msk, &list); 1485 spin_unlock_bh(&msk->pm.lock); 1486 } 1487 return ret; 1488 } 1489 1490 static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) 1491 { 1492 /* If it was marked as used, and not ID 0, decrement local_addr_used */ 1493 if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && 1494 id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) 1495 msk->pm.local_addr_used--; 1496 } 1497 1498 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, 1499 const struct mptcp_pm_addr_entry *entry) 1500 { 1501 const struct mptcp_addr_info *addr = &entry->addr; 1502 struct mptcp_rm_list list = { .nr = 1 }; 1503 long s_slot = 0, s_num = 0; 1504 struct mptcp_sock *msk; 1505 1506 pr_debug("remove_id=%d\n", addr->id); 1507 1508 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1509 struct sock *sk = (struct sock *)msk; 1510 bool remove_subflow; 1511 1512 if (mptcp_pm_is_userspace(msk)) 1513 goto next; 1514 1515 if (list_empty(&msk->conn_list)) { 1516 mptcp_pm_remove_anno_addr(msk, addr, false); 1517 goto next; 1518 } 1519 1520 lock_sock(sk); 1521 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); 1522 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && 1523 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); 1524 1525 list.ids[0] = mptcp_endp_get_local_id(msk, addr); 1526 if (remove_subflow) { 1527 spin_lock_bh(&msk->pm.lock); 1528 mptcp_pm_nl_rm_subflow_received(msk, &list); 1529 spin_unlock_bh(&msk->pm.lock); 1530 } 1531 1532 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1533 spin_lock_bh(&msk->pm.lock); 1534 __mark_subflow_endp_available(msk, list.ids[0]); 1535 spin_unlock_bh(&msk->pm.lock); 1536 } 1537 1538 if (msk->mpc_endpoint_id == entry->addr.id) 1539 msk->mpc_endpoint_id = 0; 1540 release_sock(sk); 1541 1542 next: 1543 sock_put(sk); 1544 cond_resched(); 1545 } 1546 1547 return 0; 1548 } 1549 1550 static int mptcp_nl_remove_id_zero_address(struct net *net, 1551 struct mptcp_addr_info *addr) 1552 { 1553 struct mptcp_rm_list list = { .nr = 0 }; 1554 long s_slot = 0, s_num = 0; 1555 struct mptcp_sock *msk; 1556 1557 list.ids[list.nr++] = 0; 1558 1559 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1560 struct sock *sk = (struct sock *)msk; 1561 struct mptcp_addr_info msk_local; 1562 1563 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1564 goto next; 1565 1566 mptcp_local_address((struct sock_common *)msk, &msk_local); 1567 if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) 1568 goto next; 1569 1570 lock_sock(sk); 1571 spin_lock_bh(&msk->pm.lock); 1572 mptcp_pm_remove_addr(msk, &list); 1573 mptcp_pm_nl_rm_subflow_received(msk, &list); 1574 __mark_subflow_endp_available(msk, 0); 1575 spin_unlock_bh(&msk->pm.lock); 1576 release_sock(sk); 1577 1578 next: 1579 sock_put(sk); 1580 cond_resched(); 1581 } 1582 1583 return 0; 1584 } 1585 1586 int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) 1587 { 1588 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1589 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1590 struct mptcp_pm_addr_entry addr, *entry; 1591 unsigned int addr_max; 1592 int ret; 1593 1594 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1595 if (ret < 0) 1596 return ret; 1597 1598 /* the zero id address is special: the first address used by the msk 1599 * always gets such an id, so different subflows can have different zero 1600 * id addresses. Additionally zero id is not accounted for in id_bitmap. 1601 * Let's use an 'mptcp_rm_list' instead of the common remove code. 1602 */ 1603 if (addr.addr.id == 0) 1604 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); 1605 1606 spin_lock_bh(&pernet->lock); 1607 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1608 if (!entry) { 1609 GENL_SET_ERR_MSG(info, "address not found"); 1610 spin_unlock_bh(&pernet->lock); 1611 return -EINVAL; 1612 } 1613 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1614 addr_max = pernet->add_addr_signal_max; 1615 WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1); 1616 } 1617 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1618 addr_max = pernet->local_addr_max; 1619 WRITE_ONCE(pernet->local_addr_max, addr_max - 1); 1620 } 1621 1622 pernet->addrs--; 1623 list_del_rcu(&entry->list); 1624 __clear_bit(entry->addr.id, pernet->id_bitmap); 1625 spin_unlock_bh(&pernet->lock); 1626 1627 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); 1628 synchronize_rcu(); 1629 __mptcp_pm_release_addr_entry(entry); 1630 1631 return ret; 1632 } 1633 1634 /* Called from the userspace PM only */ 1635 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) 1636 { 1637 struct mptcp_rm_list alist = { .nr = 0 }; 1638 struct mptcp_pm_addr_entry *entry; 1639 int anno_nr = 0; 1640 1641 list_for_each_entry(entry, rm_list, list) { 1642 if (alist.nr >= MPTCP_RM_IDS_MAX) 1643 break; 1644 1645 /* only delete if either announced or matching a subflow */ 1646 if (remove_anno_list_by_saddr(msk, &entry->addr)) 1647 anno_nr++; 1648 else if (!lookup_subflow_by_saddr(&msk->conn_list, 1649 &entry->addr)) 1650 continue; 1651 1652 alist.ids[alist.nr++] = entry->addr.id; 1653 } 1654 1655 if (alist.nr) { 1656 spin_lock_bh(&msk->pm.lock); 1657 msk->pm.add_addr_signaled -= anno_nr; 1658 mptcp_pm_remove_addr(msk, &alist); 1659 spin_unlock_bh(&msk->pm.lock); 1660 } 1661 } 1662 1663 /* Called from the in-kernel PM only */ 1664 static void mptcp_pm_flush_addrs_and_subflows(struct mptcp_sock *msk, 1665 struct list_head *rm_list) 1666 { 1667 struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; 1668 struct mptcp_pm_addr_entry *entry; 1669 1670 list_for_each_entry(entry, rm_list, list) { 1671 if (slist.nr < MPTCP_RM_IDS_MAX && 1672 lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) 1673 slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1674 1675 if (alist.nr < MPTCP_RM_IDS_MAX && 1676 remove_anno_list_by_saddr(msk, &entry->addr)) 1677 alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1678 } 1679 1680 spin_lock_bh(&msk->pm.lock); 1681 if (alist.nr) { 1682 msk->pm.add_addr_signaled -= alist.nr; 1683 mptcp_pm_remove_addr(msk, &alist); 1684 } 1685 if (slist.nr) 1686 mptcp_pm_nl_rm_subflow_received(msk, &slist); 1687 /* Reset counters: maybe some subflows have been removed before */ 1688 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1689 msk->pm.local_addr_used = 0; 1690 spin_unlock_bh(&msk->pm.lock); 1691 } 1692 1693 static void mptcp_nl_flush_addrs_list(struct net *net, 1694 struct list_head *rm_list) 1695 { 1696 long s_slot = 0, s_num = 0; 1697 struct mptcp_sock *msk; 1698 1699 if (list_empty(rm_list)) 1700 return; 1701 1702 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1703 struct sock *sk = (struct sock *)msk; 1704 1705 if (!mptcp_pm_is_userspace(msk)) { 1706 lock_sock(sk); 1707 mptcp_pm_flush_addrs_and_subflows(msk, rm_list); 1708 release_sock(sk); 1709 } 1710 1711 sock_put(sk); 1712 cond_resched(); 1713 } 1714 } 1715 1716 /* caller must ensure the RCU grace period is already elapsed */ 1717 static void __flush_addrs(struct list_head *list) 1718 { 1719 while (!list_empty(list)) { 1720 struct mptcp_pm_addr_entry *cur; 1721 1722 cur = list_entry(list->next, 1723 struct mptcp_pm_addr_entry, list); 1724 list_del_rcu(&cur->list); 1725 __mptcp_pm_release_addr_entry(cur); 1726 } 1727 } 1728 1729 static void __reset_counters(struct pm_nl_pernet *pernet) 1730 { 1731 WRITE_ONCE(pernet->add_addr_signal_max, 0); 1732 WRITE_ONCE(pernet->add_addr_accept_max, 0); 1733 WRITE_ONCE(pernet->local_addr_max, 0); 1734 pernet->addrs = 0; 1735 } 1736 1737 int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) 1738 { 1739 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1740 LIST_HEAD(free_list); 1741 1742 spin_lock_bh(&pernet->lock); 1743 list_splice_init(&pernet->local_addr_list, &free_list); 1744 __reset_counters(pernet); 1745 pernet->next_id = 1; 1746 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1747 spin_unlock_bh(&pernet->lock); 1748 mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); 1749 synchronize_rcu(); 1750 __flush_addrs(&free_list); 1751 return 0; 1752 } 1753 1754 int mptcp_nl_fill_addr(struct sk_buff *skb, 1755 struct mptcp_pm_addr_entry *entry) 1756 { 1757 struct mptcp_addr_info *addr = &entry->addr; 1758 struct nlattr *attr; 1759 1760 attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR); 1761 if (!attr) 1762 return -EMSGSIZE; 1763 1764 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family)) 1765 goto nla_put_failure; 1766 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port))) 1767 goto nla_put_failure; 1768 if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id)) 1769 goto nla_put_failure; 1770 if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags)) 1771 goto nla_put_failure; 1772 if (entry->ifindex && 1773 nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex)) 1774 goto nla_put_failure; 1775 1776 if (addr->family == AF_INET && 1777 nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4, 1778 addr->addr.s_addr)) 1779 goto nla_put_failure; 1780 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1781 else if (addr->family == AF_INET6 && 1782 nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6)) 1783 goto nla_put_failure; 1784 #endif 1785 nla_nest_end(skb, attr); 1786 return 0; 1787 1788 nla_put_failure: 1789 nla_nest_cancel(skb, attr); 1790 return -EMSGSIZE; 1791 } 1792 1793 int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info) 1794 { 1795 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1796 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1797 struct mptcp_pm_addr_entry addr, *entry; 1798 struct sk_buff *msg; 1799 void *reply; 1800 int ret; 1801 1802 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1803 if (ret < 0) 1804 return ret; 1805 1806 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1807 if (!msg) 1808 return -ENOMEM; 1809 1810 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1811 info->genlhdr->cmd); 1812 if (!reply) { 1813 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1814 ret = -EMSGSIZE; 1815 goto fail; 1816 } 1817 1818 spin_lock_bh(&pernet->lock); 1819 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1820 if (!entry) { 1821 GENL_SET_ERR_MSG(info, "address not found"); 1822 ret = -EINVAL; 1823 goto unlock_fail; 1824 } 1825 1826 ret = mptcp_nl_fill_addr(msg, entry); 1827 if (ret) 1828 goto unlock_fail; 1829 1830 genlmsg_end(msg, reply); 1831 ret = genlmsg_reply(msg, info); 1832 spin_unlock_bh(&pernet->lock); 1833 return ret; 1834 1835 unlock_fail: 1836 spin_unlock_bh(&pernet->lock); 1837 1838 fail: 1839 nlmsg_free(msg); 1840 return ret; 1841 } 1842 1843 int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info) 1844 { 1845 return mptcp_pm_get_addr(skb, info); 1846 } 1847 1848 int mptcp_pm_nl_dump_addr(struct sk_buff *msg, 1849 struct netlink_callback *cb) 1850 { 1851 struct net *net = sock_net(msg->sk); 1852 struct mptcp_pm_addr_entry *entry; 1853 struct pm_nl_pernet *pernet; 1854 int id = cb->args[0]; 1855 void *hdr; 1856 int i; 1857 1858 pernet = pm_nl_get_pernet(net); 1859 1860 spin_lock_bh(&pernet->lock); 1861 for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { 1862 if (test_bit(i, pernet->id_bitmap)) { 1863 entry = __lookup_addr_by_id(pernet, i); 1864 if (!entry) 1865 break; 1866 1867 if (entry->addr.id <= id) 1868 continue; 1869 1870 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, 1871 cb->nlh->nlmsg_seq, &mptcp_genl_family, 1872 NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); 1873 if (!hdr) 1874 break; 1875 1876 if (mptcp_nl_fill_addr(msg, entry) < 0) { 1877 genlmsg_cancel(msg, hdr); 1878 break; 1879 } 1880 1881 id = entry->addr.id; 1882 genlmsg_end(msg, hdr); 1883 } 1884 } 1885 spin_unlock_bh(&pernet->lock); 1886 1887 cb->args[0] = id; 1888 return msg->len; 1889 } 1890 1891 int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg, 1892 struct netlink_callback *cb) 1893 { 1894 return mptcp_pm_dump_addr(msg, cb); 1895 } 1896 1897 static int parse_limit(struct genl_info *info, int id, unsigned int *limit) 1898 { 1899 struct nlattr *attr = info->attrs[id]; 1900 1901 if (!attr) 1902 return 0; 1903 1904 *limit = nla_get_u32(attr); 1905 if (*limit > MPTCP_PM_ADDR_MAX) { 1906 GENL_SET_ERR_MSG(info, "limit greater than maximum"); 1907 return -EINVAL; 1908 } 1909 return 0; 1910 } 1911 1912 int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) 1913 { 1914 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1915 unsigned int rcv_addrs, subflows; 1916 int ret; 1917 1918 spin_lock_bh(&pernet->lock); 1919 rcv_addrs = pernet->add_addr_accept_max; 1920 ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); 1921 if (ret) 1922 goto unlock; 1923 1924 subflows = pernet->subflows_max; 1925 ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); 1926 if (ret) 1927 goto unlock; 1928 1929 WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs); 1930 WRITE_ONCE(pernet->subflows_max, subflows); 1931 1932 unlock: 1933 spin_unlock_bh(&pernet->lock); 1934 return ret; 1935 } 1936 1937 int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) 1938 { 1939 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1940 struct sk_buff *msg; 1941 void *reply; 1942 1943 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1944 if (!msg) 1945 return -ENOMEM; 1946 1947 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1948 MPTCP_PM_CMD_GET_LIMITS); 1949 if (!reply) 1950 goto fail; 1951 1952 if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, 1953 READ_ONCE(pernet->add_addr_accept_max))) 1954 goto fail; 1955 1956 if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, 1957 READ_ONCE(pernet->subflows_max))) 1958 goto fail; 1959 1960 genlmsg_end(msg, reply); 1961 return genlmsg_reply(msg, info); 1962 1963 fail: 1964 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1965 nlmsg_free(msg); 1966 return -EMSGSIZE; 1967 } 1968 1969 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, 1970 struct mptcp_addr_info *addr) 1971 { 1972 struct mptcp_rm_list list = { .nr = 0 }; 1973 1974 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1975 1976 spin_lock_bh(&msk->pm.lock); 1977 mptcp_pm_nl_rm_subflow_received(msk, &list); 1978 __mark_subflow_endp_available(msk, list.ids[0]); 1979 mptcp_pm_create_subflow_or_signal_addr(msk); 1980 spin_unlock_bh(&msk->pm.lock); 1981 } 1982 1983 static int mptcp_nl_set_flags(struct net *net, 1984 struct mptcp_addr_info *addr, 1985 u8 bkup, u8 changed) 1986 { 1987 long s_slot = 0, s_num = 0; 1988 struct mptcp_sock *msk; 1989 int ret = -EINVAL; 1990 1991 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1992 struct sock *sk = (struct sock *)msk; 1993 1994 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1995 goto next; 1996 1997 lock_sock(sk); 1998 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) 1999 ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup); 2000 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) 2001 mptcp_pm_nl_fullmesh(msk, addr); 2002 release_sock(sk); 2003 2004 next: 2005 sock_put(sk); 2006 cond_resched(); 2007 } 2008 2009 return ret; 2010 } 2011 2012 int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info) 2013 { 2014 struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }; 2015 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; 2016 u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | 2017 MPTCP_PM_ADDR_FLAG_FULLMESH; 2018 struct net *net = sock_net(skb->sk); 2019 struct mptcp_pm_addr_entry *entry; 2020 struct pm_nl_pernet *pernet; 2021 u8 lookup_by_id = 0; 2022 u8 bkup = 0; 2023 int ret; 2024 2025 pernet = pm_nl_get_pernet(net); 2026 2027 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 2028 if (ret < 0) 2029 return ret; 2030 2031 if (addr.addr.family == AF_UNSPEC) { 2032 lookup_by_id = 1; 2033 if (!addr.addr.id) { 2034 GENL_SET_ERR_MSG(info, "missing required inputs"); 2035 return -EOPNOTSUPP; 2036 } 2037 } 2038 2039 if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) 2040 bkup = 1; 2041 2042 spin_lock_bh(&pernet->lock); 2043 entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) : 2044 __lookup_addr(pernet, &addr.addr); 2045 if (!entry) { 2046 spin_unlock_bh(&pernet->lock); 2047 GENL_SET_ERR_MSG(info, "address not found"); 2048 return -EINVAL; 2049 } 2050 if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && 2051 (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { 2052 spin_unlock_bh(&pernet->lock); 2053 GENL_SET_ERR_MSG(info, "invalid addr flags"); 2054 return -EINVAL; 2055 } 2056 2057 changed = (addr.flags ^ entry->flags) & mask; 2058 entry->flags = (entry->flags & ~mask) | (addr.flags & mask); 2059 addr = *entry; 2060 spin_unlock_bh(&pernet->lock); 2061 2062 mptcp_nl_set_flags(net, &addr.addr, bkup, changed); 2063 return 0; 2064 } 2065 2066 int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info) 2067 { 2068 return mptcp_pm_set_flags(skb, info); 2069 } 2070 2071 static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp) 2072 { 2073 genlmsg_multicast_netns(&mptcp_genl_family, net, 2074 nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp); 2075 } 2076 2077 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk) 2078 { 2079 return genl_has_listeners(&mptcp_genl_family, 2080 sock_net((const struct sock *)msk), 2081 MPTCP_PM_EV_GRP_OFFSET); 2082 } 2083 2084 static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk) 2085 { 2086 const struct inet_sock *issk = inet_sk(ssk); 2087 const struct mptcp_subflow_context *sf; 2088 2089 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) 2090 return -EMSGSIZE; 2091 2092 switch (ssk->sk_family) { 2093 case AF_INET: 2094 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) 2095 return -EMSGSIZE; 2096 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr)) 2097 return -EMSGSIZE; 2098 break; 2099 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2100 case AF_INET6: { 2101 const struct ipv6_pinfo *np = inet6_sk(ssk); 2102 2103 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) 2104 return -EMSGSIZE; 2105 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr)) 2106 return -EMSGSIZE; 2107 break; 2108 } 2109 #endif 2110 default: 2111 WARN_ON_ONCE(1); 2112 return -EMSGSIZE; 2113 } 2114 2115 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) 2116 return -EMSGSIZE; 2117 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport)) 2118 return -EMSGSIZE; 2119 2120 sf = mptcp_subflow_ctx(ssk); 2121 if (WARN_ON_ONCE(!sf)) 2122 return -EINVAL; 2123 2124 if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf))) 2125 return -EMSGSIZE; 2126 2127 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id)) 2128 return -EMSGSIZE; 2129 2130 return 0; 2131 } 2132 2133 static int mptcp_event_put_token_and_ssk(struct sk_buff *skb, 2134 const struct mptcp_sock *msk, 2135 const struct sock *ssk) 2136 { 2137 const struct sock *sk = (const struct sock *)msk; 2138 const struct mptcp_subflow_context *sf; 2139 u8 sk_err; 2140 2141 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2142 return -EMSGSIZE; 2143 2144 if (mptcp_event_add_subflow(skb, ssk)) 2145 return -EMSGSIZE; 2146 2147 sf = mptcp_subflow_ctx(ssk); 2148 if (WARN_ON_ONCE(!sf)) 2149 return -EINVAL; 2150 2151 if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup)) 2152 return -EMSGSIZE; 2153 2154 if (ssk->sk_bound_dev_if && 2155 nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if)) 2156 return -EMSGSIZE; 2157 2158 sk_err = READ_ONCE(ssk->sk_err); 2159 if (sk_err && sk->sk_state == TCP_ESTABLISHED && 2160 nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err)) 2161 return -EMSGSIZE; 2162 2163 return 0; 2164 } 2165 2166 static int mptcp_event_sub_established(struct sk_buff *skb, 2167 const struct mptcp_sock *msk, 2168 const struct sock *ssk) 2169 { 2170 return mptcp_event_put_token_and_ssk(skb, msk, ssk); 2171 } 2172 2173 static int mptcp_event_sub_closed(struct sk_buff *skb, 2174 const struct mptcp_sock *msk, 2175 const struct sock *ssk) 2176 { 2177 const struct mptcp_subflow_context *sf; 2178 2179 if (mptcp_event_put_token_and_ssk(skb, msk, ssk)) 2180 return -EMSGSIZE; 2181 2182 sf = mptcp_subflow_ctx(ssk); 2183 if (!sf->reset_seen) 2184 return 0; 2185 2186 if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason)) 2187 return -EMSGSIZE; 2188 2189 if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient)) 2190 return -EMSGSIZE; 2191 2192 return 0; 2193 } 2194 2195 static int mptcp_event_created(struct sk_buff *skb, 2196 const struct mptcp_sock *msk, 2197 const struct sock *ssk) 2198 { 2199 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)); 2200 2201 if (err) 2202 return err; 2203 2204 if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) 2205 return -EMSGSIZE; 2206 2207 return mptcp_event_add_subflow(skb, ssk); 2208 } 2209 2210 void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id) 2211 { 2212 struct net *net = sock_net((const struct sock *)msk); 2213 struct nlmsghdr *nlh; 2214 struct sk_buff *skb; 2215 2216 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2217 return; 2218 2219 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 2220 if (!skb) 2221 return; 2222 2223 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED); 2224 if (!nlh) 2225 goto nla_put_failure; 2226 2227 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2228 goto nla_put_failure; 2229 2230 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id)) 2231 goto nla_put_failure; 2232 2233 genlmsg_end(skb, nlh); 2234 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); 2235 return; 2236 2237 nla_put_failure: 2238 nlmsg_free(skb); 2239 } 2240 2241 void mptcp_event_addr_announced(const struct sock *ssk, 2242 const struct mptcp_addr_info *info) 2243 { 2244 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 2245 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 2246 struct net *net = sock_net(ssk); 2247 struct nlmsghdr *nlh; 2248 struct sk_buff *skb; 2249 2250 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2251 return; 2252 2253 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 2254 if (!skb) 2255 return; 2256 2257 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, 2258 MPTCP_EVENT_ANNOUNCED); 2259 if (!nlh) 2260 goto nla_put_failure; 2261 2262 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2263 goto nla_put_failure; 2264 2265 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id)) 2266 goto nla_put_failure; 2267 2268 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, 2269 info->port == 0 ? 2270 inet_sk(ssk)->inet_dport : 2271 info->port)) 2272 goto nla_put_failure; 2273 2274 switch (info->family) { 2275 case AF_INET: 2276 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr)) 2277 goto nla_put_failure; 2278 break; 2279 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2280 case AF_INET6: 2281 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6)) 2282 goto nla_put_failure; 2283 break; 2284 #endif 2285 default: 2286 WARN_ON_ONCE(1); 2287 goto nla_put_failure; 2288 } 2289 2290 genlmsg_end(skb, nlh); 2291 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); 2292 return; 2293 2294 nla_put_failure: 2295 nlmsg_free(skb); 2296 } 2297 2298 void mptcp_event_pm_listener(const struct sock *ssk, 2299 enum mptcp_event_type event) 2300 { 2301 const struct inet_sock *issk = inet_sk(ssk); 2302 struct net *net = sock_net(ssk); 2303 struct nlmsghdr *nlh; 2304 struct sk_buff *skb; 2305 2306 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2307 return; 2308 2309 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2310 if (!skb) 2311 return; 2312 2313 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, event); 2314 if (!nlh) 2315 goto nla_put_failure; 2316 2317 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) 2318 goto nla_put_failure; 2319 2320 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) 2321 goto nla_put_failure; 2322 2323 switch (ssk->sk_family) { 2324 case AF_INET: 2325 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) 2326 goto nla_put_failure; 2327 break; 2328 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2329 case AF_INET6: { 2330 const struct ipv6_pinfo *np = inet6_sk(ssk); 2331 2332 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) 2333 goto nla_put_failure; 2334 break; 2335 } 2336 #endif 2337 default: 2338 WARN_ON_ONCE(1); 2339 goto nla_put_failure; 2340 } 2341 2342 genlmsg_end(skb, nlh); 2343 mptcp_nl_mcast_send(net, skb, GFP_KERNEL); 2344 return; 2345 2346 nla_put_failure: 2347 nlmsg_free(skb); 2348 } 2349 2350 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, 2351 const struct sock *ssk, gfp_t gfp) 2352 { 2353 struct net *net = sock_net((const struct sock *)msk); 2354 struct nlmsghdr *nlh; 2355 struct sk_buff *skb; 2356 2357 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2358 return; 2359 2360 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 2361 if (!skb) 2362 return; 2363 2364 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type); 2365 if (!nlh) 2366 goto nla_put_failure; 2367 2368 switch (type) { 2369 case MPTCP_EVENT_UNSPEC: 2370 WARN_ON_ONCE(1); 2371 break; 2372 case MPTCP_EVENT_CREATED: 2373 case MPTCP_EVENT_ESTABLISHED: 2374 if (mptcp_event_created(skb, msk, ssk) < 0) 2375 goto nla_put_failure; 2376 break; 2377 case MPTCP_EVENT_CLOSED: 2378 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0) 2379 goto nla_put_failure; 2380 break; 2381 case MPTCP_EVENT_ANNOUNCED: 2382 case MPTCP_EVENT_REMOVED: 2383 /* call mptcp_event_addr_announced()/removed instead */ 2384 WARN_ON_ONCE(1); 2385 break; 2386 case MPTCP_EVENT_SUB_ESTABLISHED: 2387 case MPTCP_EVENT_SUB_PRIORITY: 2388 if (mptcp_event_sub_established(skb, msk, ssk) < 0) 2389 goto nla_put_failure; 2390 break; 2391 case MPTCP_EVENT_SUB_CLOSED: 2392 if (mptcp_event_sub_closed(skb, msk, ssk) < 0) 2393 goto nla_put_failure; 2394 break; 2395 case MPTCP_EVENT_LISTENER_CREATED: 2396 case MPTCP_EVENT_LISTENER_CLOSED: 2397 break; 2398 } 2399 2400 genlmsg_end(skb, nlh); 2401 mptcp_nl_mcast_send(net, skb, gfp); 2402 return; 2403 2404 nla_put_failure: 2405 nlmsg_free(skb); 2406 } 2407 2408 struct genl_family mptcp_genl_family __ro_after_init = { 2409 .name = MPTCP_PM_NAME, 2410 .version = MPTCP_PM_VER, 2411 .netnsok = true, 2412 .module = THIS_MODULE, 2413 .ops = mptcp_pm_nl_ops, 2414 .n_ops = ARRAY_SIZE(mptcp_pm_nl_ops), 2415 .resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1, 2416 .mcgrps = mptcp_pm_mcgrps, 2417 .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps), 2418 }; 2419 2420 static int __net_init pm_nl_init_net(struct net *net) 2421 { 2422 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 2423 2424 INIT_LIST_HEAD_RCU(&pernet->local_addr_list); 2425 2426 /* Cit. 2 subflows ought to be enough for anybody. */ 2427 pernet->subflows_max = 2; 2428 pernet->next_id = 1; 2429 pernet->stale_loss_cnt = 4; 2430 spin_lock_init(&pernet->lock); 2431 2432 /* No need to initialize other pernet fields, the struct is zeroed at 2433 * allocation time. 2434 */ 2435 2436 return 0; 2437 } 2438 2439 static void __net_exit pm_nl_exit_net(struct list_head *net_list) 2440 { 2441 struct net *net; 2442 2443 list_for_each_entry(net, net_list, exit_list) { 2444 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 2445 2446 /* net is removed from namespace list, can't race with 2447 * other modifiers, also netns core already waited for a 2448 * RCU grace period. 2449 */ 2450 __flush_addrs(&pernet->local_addr_list); 2451 } 2452 } 2453 2454 static struct pernet_operations mptcp_pm_pernet_ops = { 2455 .init = pm_nl_init_net, 2456 .exit_batch = pm_nl_exit_net, 2457 .id = &pm_nl_pernet_id, 2458 .size = sizeof(struct pm_nl_pernet), 2459 }; 2460 2461 void __init mptcp_pm_nl_init(void) 2462 { 2463 if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) 2464 panic("Failed to register MPTCP PM pernet subsystem.\n"); 2465 2466 if (genl_register_family(&mptcp_genl_family)) 2467 panic("Failed to register MPTCP PM netlink family\n"); 2468 } 2469