1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2020, Red Hat, Inc. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/inet.h> 10 #include <linux/kernel.h> 11 #include <net/inet_common.h> 12 #include <net/netns/generic.h> 13 #include <net/mptcp.h> 14 15 #include "protocol.h" 16 #include "mib.h" 17 #include "mptcp_pm_gen.h" 18 19 static int pm_nl_pernet_id; 20 21 struct mptcp_pm_add_entry { 22 struct list_head list; 23 struct mptcp_addr_info addr; 24 u8 retrans_times; 25 struct timer_list add_timer; 26 struct mptcp_sock *sock; 27 }; 28 29 struct pm_nl_pernet { 30 /* protects pernet updates */ 31 spinlock_t lock; 32 struct list_head local_addr_list; 33 unsigned int addrs; 34 unsigned int stale_loss_cnt; 35 unsigned int add_addr_signal_max; 36 unsigned int add_addr_accept_max; 37 unsigned int local_addr_max; 38 unsigned int subflows_max; 39 unsigned int next_id; 40 DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 41 }; 42 43 #define MPTCP_PM_ADDR_MAX 8 44 #define ADD_ADDR_RETRANS_MAX 3 45 46 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) 47 { 48 return net_generic(net, pm_nl_pernet_id); 49 } 50 51 static struct pm_nl_pernet * 52 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) 53 { 54 return pm_nl_get_pernet(sock_net((struct sock *)msk)); 55 } 56 57 bool mptcp_addresses_equal(const struct mptcp_addr_info *a, 58 const struct mptcp_addr_info *b, bool use_port) 59 { 60 bool addr_equals = false; 61 62 if (a->family == b->family) { 63 if (a->family == AF_INET) 64 addr_equals = a->addr.s_addr == b->addr.s_addr; 65 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 66 else 67 addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6); 68 } else if (a->family == AF_INET) { 69 if (ipv6_addr_v4mapped(&b->addr6)) 70 addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3]; 71 } else if (b->family == AF_INET) { 72 if (ipv6_addr_v4mapped(&a->addr6)) 73 addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr; 74 #endif 75 } 76 77 if (!addr_equals) 78 return false; 79 if (!use_port) 80 return true; 81 82 return a->port == b->port; 83 } 84 85 void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr) 86 { 87 addr->family = skc->skc_family; 88 addr->port = htons(skc->skc_num); 89 if (addr->family == AF_INET) 90 addr->addr.s_addr = skc->skc_rcv_saddr; 91 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 92 else if (addr->family == AF_INET6) 93 addr->addr6 = skc->skc_v6_rcv_saddr; 94 #endif 95 } 96 97 static void remote_address(const struct sock_common *skc, 98 struct mptcp_addr_info *addr) 99 { 100 addr->family = skc->skc_family; 101 addr->port = skc->skc_dport; 102 if (addr->family == AF_INET) 103 addr->addr.s_addr = skc->skc_daddr; 104 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 105 else if (addr->family == AF_INET6) 106 addr->addr6 = skc->skc_v6_daddr; 107 #endif 108 } 109 110 static bool lookup_subflow_by_saddr(const struct list_head *list, 111 const struct mptcp_addr_info *saddr) 112 { 113 struct mptcp_subflow_context *subflow; 114 struct mptcp_addr_info cur; 115 struct sock_common *skc; 116 117 list_for_each_entry(subflow, list, node) { 118 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); 119 120 mptcp_local_address(skc, &cur); 121 if (mptcp_addresses_equal(&cur, saddr, saddr->port)) 122 return true; 123 } 124 125 return false; 126 } 127 128 static bool lookup_subflow_by_daddr(const struct list_head *list, 129 const struct mptcp_addr_info *daddr) 130 { 131 struct mptcp_subflow_context *subflow; 132 struct mptcp_addr_info cur; 133 134 list_for_each_entry(subflow, list, node) { 135 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 136 137 if (!((1 << inet_sk_state_load(ssk)) & 138 (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) 139 continue; 140 141 remote_address((struct sock_common *)ssk, &cur); 142 if (mptcp_addresses_equal(&cur, daddr, daddr->port)) 143 return true; 144 } 145 146 return false; 147 } 148 149 static bool 150 select_local_address(const struct pm_nl_pernet *pernet, 151 const struct mptcp_sock *msk, 152 struct mptcp_pm_addr_entry *new_entry) 153 { 154 struct mptcp_pm_addr_entry *entry; 155 bool found = false; 156 157 msk_owned_by_me(msk); 158 159 rcu_read_lock(); 160 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 161 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) 162 continue; 163 164 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 165 continue; 166 167 *new_entry = *entry; 168 found = true; 169 break; 170 } 171 rcu_read_unlock(); 172 173 return found; 174 } 175 176 static bool 177 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, 178 struct mptcp_pm_addr_entry *new_entry) 179 { 180 struct mptcp_pm_addr_entry *entry; 181 bool found = false; 182 183 rcu_read_lock(); 184 /* do not keep any additional per socket state, just signal 185 * the address list in order. 186 * Note: removal from the local address list during the msk life-cycle 187 * can lead to additional addresses not being announced. 188 */ 189 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 190 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 191 continue; 192 193 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) 194 continue; 195 196 *new_entry = *entry; 197 found = true; 198 break; 199 } 200 rcu_read_unlock(); 201 202 return found; 203 } 204 205 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk) 206 { 207 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 208 209 return READ_ONCE(pernet->add_addr_signal_max); 210 } 211 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max); 212 213 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk) 214 { 215 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 216 217 return READ_ONCE(pernet->add_addr_accept_max); 218 } 219 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max); 220 221 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk) 222 { 223 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 224 225 return READ_ONCE(pernet->subflows_max); 226 } 227 EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max); 228 229 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk) 230 { 231 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 232 233 return READ_ONCE(pernet->local_addr_max); 234 } 235 EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max); 236 237 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) 238 { 239 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 240 241 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) || 242 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, 243 MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { 244 WRITE_ONCE(msk->pm.work_pending, false); 245 return false; 246 } 247 return true; 248 } 249 250 struct mptcp_pm_add_entry * 251 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, 252 const struct mptcp_addr_info *addr) 253 { 254 struct mptcp_pm_add_entry *entry; 255 256 lockdep_assert_held(&msk->pm.lock); 257 258 list_for_each_entry(entry, &msk->pm.anno_list, list) { 259 if (mptcp_addresses_equal(&entry->addr, addr, true)) 260 return entry; 261 } 262 263 return NULL; 264 } 265 266 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) 267 { 268 struct mptcp_pm_add_entry *entry; 269 struct mptcp_addr_info saddr; 270 bool ret = false; 271 272 mptcp_local_address((struct sock_common *)sk, &saddr); 273 274 spin_lock_bh(&msk->pm.lock); 275 list_for_each_entry(entry, &msk->pm.anno_list, list) { 276 if (mptcp_addresses_equal(&entry->addr, &saddr, true)) { 277 ret = true; 278 goto out; 279 } 280 } 281 282 out: 283 spin_unlock_bh(&msk->pm.lock); 284 return ret; 285 } 286 287 static void mptcp_pm_add_timer(struct timer_list *timer) 288 { 289 struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); 290 struct mptcp_sock *msk = entry->sock; 291 struct sock *sk = (struct sock *)msk; 292 293 pr_debug("msk=%p\n", msk); 294 295 if (!msk) 296 return; 297 298 if (inet_sk_state_load(sk) == TCP_CLOSE) 299 return; 300 301 if (!entry->addr.id) 302 return; 303 304 if (mptcp_pm_should_add_signal_addr(msk)) { 305 sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8); 306 goto out; 307 } 308 309 spin_lock_bh(&msk->pm.lock); 310 311 if (!mptcp_pm_should_add_signal_addr(msk)) { 312 pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); 313 mptcp_pm_announce_addr(msk, &entry->addr, false); 314 mptcp_pm_add_addr_send_ack(msk); 315 entry->retrans_times++; 316 } 317 318 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) 319 sk_reset_timer(sk, timer, 320 jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); 321 322 spin_unlock_bh(&msk->pm.lock); 323 324 if (entry->retrans_times == ADD_ADDR_RETRANS_MAX) 325 mptcp_pm_subflow_established(msk); 326 327 out: 328 __sock_put(sk); 329 } 330 331 struct mptcp_pm_add_entry * 332 mptcp_pm_del_add_timer(struct mptcp_sock *msk, 333 const struct mptcp_addr_info *addr, bool check_id) 334 { 335 struct mptcp_pm_add_entry *entry; 336 struct sock *sk = (struct sock *)msk; 337 338 spin_lock_bh(&msk->pm.lock); 339 entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 340 if (entry && (!check_id || entry->addr.id == addr->id)) 341 entry->retrans_times = ADD_ADDR_RETRANS_MAX; 342 spin_unlock_bh(&msk->pm.lock); 343 344 if (entry && (!check_id || entry->addr.id == addr->id)) 345 sk_stop_timer_sync(sk, &entry->add_timer); 346 347 return entry; 348 } 349 350 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, 351 const struct mptcp_addr_info *addr) 352 { 353 struct mptcp_pm_add_entry *add_entry = NULL; 354 struct sock *sk = (struct sock *)msk; 355 struct net *net = sock_net(sk); 356 357 lockdep_assert_held(&msk->pm.lock); 358 359 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 360 361 if (add_entry) { 362 if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) 363 return false; 364 365 sk_reset_timer(sk, &add_entry->add_timer, 366 jiffies + mptcp_get_add_addr_timeout(net)); 367 return true; 368 } 369 370 add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); 371 if (!add_entry) 372 return false; 373 374 list_add(&add_entry->list, &msk->pm.anno_list); 375 376 add_entry->addr = *addr; 377 add_entry->sock = msk; 378 add_entry->retrans_times = 0; 379 380 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); 381 sk_reset_timer(sk, &add_entry->add_timer, 382 jiffies + mptcp_get_add_addr_timeout(net)); 383 384 return true; 385 } 386 387 void mptcp_pm_free_anno_list(struct mptcp_sock *msk) 388 { 389 struct mptcp_pm_add_entry *entry, *tmp; 390 struct sock *sk = (struct sock *)msk; 391 LIST_HEAD(free_list); 392 393 pr_debug("msk=%p\n", msk); 394 395 spin_lock_bh(&msk->pm.lock); 396 list_splice_init(&msk->pm.anno_list, &free_list); 397 spin_unlock_bh(&msk->pm.lock); 398 399 list_for_each_entry_safe(entry, tmp, &free_list, list) { 400 sk_stop_timer_sync(sk, &entry->add_timer); 401 kfree(entry); 402 } 403 } 404 405 /* Fill all the remote addresses into the array addrs[], 406 * and return the array size. 407 */ 408 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, 409 struct mptcp_addr_info *local, 410 bool fullmesh, 411 struct mptcp_addr_info *addrs) 412 { 413 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); 414 struct sock *sk = (struct sock *)msk, *ssk; 415 struct mptcp_subflow_context *subflow; 416 struct mptcp_addr_info remote = { 0 }; 417 unsigned int subflows_max; 418 int i = 0; 419 420 subflows_max = mptcp_pm_get_subflows_max(msk); 421 remote_address((struct sock_common *)sk, &remote); 422 423 /* Non-fullmesh endpoint, fill in the single entry 424 * corresponding to the primary MPC subflow remote address 425 */ 426 if (!fullmesh) { 427 if (deny_id0) 428 return 0; 429 430 if (!mptcp_pm_addr_families_match(sk, local, &remote)) 431 return 0; 432 433 msk->pm.subflows++; 434 addrs[i++] = remote; 435 } else { 436 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 437 438 /* Forbid creation of new subflows matching existing 439 * ones, possibly already created by incoming ADD_ADDR 440 */ 441 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 442 mptcp_for_each_subflow(msk, subflow) 443 if (READ_ONCE(subflow->local_id) == local->id) 444 __set_bit(subflow->remote_id, unavail_id); 445 446 mptcp_for_each_subflow(msk, subflow) { 447 ssk = mptcp_subflow_tcp_sock(subflow); 448 remote_address((struct sock_common *)ssk, &addrs[i]); 449 addrs[i].id = READ_ONCE(subflow->remote_id); 450 if (deny_id0 && !addrs[i].id) 451 continue; 452 453 if (test_bit(addrs[i].id, unavail_id)) 454 continue; 455 456 if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) 457 continue; 458 459 if (msk->pm.subflows < subflows_max) { 460 /* forbid creating multiple address towards 461 * this id 462 */ 463 __set_bit(addrs[i].id, unavail_id); 464 msk->pm.subflows++; 465 i++; 466 } 467 } 468 } 469 470 return i; 471 } 472 473 static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 474 bool prio, bool backup) 475 { 476 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 477 bool slow; 478 479 pr_debug("send ack for %s\n", 480 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); 481 482 slow = lock_sock_fast(ssk); 483 if (prio) { 484 subflow->send_mp_prio = 1; 485 subflow->request_bkup = backup; 486 } 487 488 __mptcp_subflow_send_ack(ssk); 489 unlock_sock_fast(ssk, slow); 490 } 491 492 static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 493 bool prio, bool backup) 494 { 495 spin_unlock_bh(&msk->pm.lock); 496 __mptcp_pm_send_ack(msk, subflow, prio, backup); 497 spin_lock_bh(&msk->pm.lock); 498 } 499 500 static struct mptcp_pm_addr_entry * 501 __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) 502 { 503 struct mptcp_pm_addr_entry *entry; 504 505 list_for_each_entry(entry, &pernet->local_addr_list, list) { 506 if (entry->addr.id == id) 507 return entry; 508 } 509 return NULL; 510 } 511 512 static struct mptcp_pm_addr_entry * 513 __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) 514 { 515 struct mptcp_pm_addr_entry *entry; 516 517 list_for_each_entry(entry, &pernet->local_addr_list, list) { 518 if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) 519 return entry; 520 } 521 return NULL; 522 } 523 524 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) 525 { 526 struct sock *sk = (struct sock *)msk; 527 struct mptcp_pm_addr_entry local; 528 unsigned int add_addr_signal_max; 529 bool signal_and_subflow = false; 530 unsigned int local_addr_max; 531 struct pm_nl_pernet *pernet; 532 unsigned int subflows_max; 533 534 pernet = pm_nl_get_pernet(sock_net(sk)); 535 536 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk); 537 local_addr_max = mptcp_pm_get_local_addr_max(msk); 538 subflows_max = mptcp_pm_get_subflows_max(msk); 539 540 /* do lazy endpoint usage accounting for the MPC subflows */ 541 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) { 542 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first); 543 struct mptcp_pm_addr_entry *entry; 544 struct mptcp_addr_info mpc_addr; 545 bool backup = false; 546 547 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); 548 rcu_read_lock(); 549 entry = __lookup_addr(pernet, &mpc_addr); 550 if (entry) { 551 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); 552 msk->mpc_endpoint_id = entry->addr.id; 553 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 554 } 555 rcu_read_unlock(); 556 557 if (backup) 558 mptcp_pm_send_ack(msk, subflow, true, backup); 559 560 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); 561 } 562 563 pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", 564 msk->pm.local_addr_used, local_addr_max, 565 msk->pm.add_addr_signaled, add_addr_signal_max, 566 msk->pm.subflows, subflows_max); 567 568 /* check first for announce */ 569 if (msk->pm.add_addr_signaled < add_addr_signal_max) { 570 /* due to racing events on both ends we can reach here while 571 * previous add address is still running: if we invoke now 572 * mptcp_pm_announce_addr(), that will fail and the 573 * corresponding id will be marked as used. 574 * Instead let the PM machinery reschedule us when the 575 * current address announce will be completed. 576 */ 577 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) 578 return; 579 580 if (!select_signal_address(pernet, msk, &local)) 581 goto subflow; 582 583 /* If the alloc fails, we are on memory pressure, not worth 584 * continuing, and trying to create subflows. 585 */ 586 if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) 587 return; 588 589 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 590 msk->pm.add_addr_signaled++; 591 592 /* Special case for ID0: set the correct ID */ 593 if (local.addr.id == msk->mpc_endpoint_id) 594 local.addr.id = 0; 595 596 mptcp_pm_announce_addr(msk, &local.addr, false); 597 mptcp_pm_nl_addr_send_ack(msk); 598 599 if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 600 signal_and_subflow = true; 601 } 602 603 subflow: 604 /* check if should create a new subflow */ 605 while (msk->pm.local_addr_used < local_addr_max && 606 msk->pm.subflows < subflows_max) { 607 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; 608 bool fullmesh; 609 int i, nr; 610 611 if (signal_and_subflow) 612 signal_and_subflow = false; 613 else if (!select_local_address(pernet, msk, &local)) 614 break; 615 616 fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); 617 618 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 619 620 /* Special case for ID0: set the correct ID */ 621 if (local.addr.id == msk->mpc_endpoint_id) 622 local.addr.id = 0; 623 else /* local_addr_used is not decr for ID 0 */ 624 msk->pm.local_addr_used++; 625 626 nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); 627 if (nr == 0) 628 continue; 629 630 spin_unlock_bh(&msk->pm.lock); 631 for (i = 0; i < nr; i++) 632 __mptcp_subflow_connect(sk, &local.addr, &addrs[i]); 633 spin_lock_bh(&msk->pm.lock); 634 } 635 mptcp_pm_nl_check_work_pending(msk); 636 } 637 638 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) 639 { 640 mptcp_pm_create_subflow_or_signal_addr(msk); 641 } 642 643 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) 644 { 645 mptcp_pm_create_subflow_or_signal_addr(msk); 646 } 647 648 /* Fill all the local addresses into the array addrs[], 649 * and return the array size. 650 */ 651 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, 652 struct mptcp_addr_info *remote, 653 struct mptcp_addr_info *addrs) 654 { 655 struct sock *sk = (struct sock *)msk; 656 struct mptcp_pm_addr_entry *entry; 657 struct mptcp_addr_info mpc_addr; 658 struct pm_nl_pernet *pernet; 659 unsigned int subflows_max; 660 int i = 0; 661 662 pernet = pm_nl_get_pernet_from_msk(msk); 663 subflows_max = mptcp_pm_get_subflows_max(msk); 664 665 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 666 667 rcu_read_lock(); 668 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 669 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) 670 continue; 671 672 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) 673 continue; 674 675 if (msk->pm.subflows < subflows_max) { 676 msk->pm.subflows++; 677 addrs[i] = entry->addr; 678 679 /* Special case for ID0: set the correct ID */ 680 if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port)) 681 addrs[i].id = 0; 682 683 i++; 684 } 685 } 686 rcu_read_unlock(); 687 688 /* If the array is empty, fill in the single 689 * 'IPADDRANY' local address 690 */ 691 if (!i) { 692 struct mptcp_addr_info local; 693 694 memset(&local, 0, sizeof(local)); 695 local.family = 696 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 697 remote->family == AF_INET6 && 698 ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : 699 #endif 700 remote->family; 701 702 if (!mptcp_pm_addr_families_match(sk, &local, remote)) 703 return 0; 704 705 msk->pm.subflows++; 706 addrs[i++] = local; 707 } 708 709 return i; 710 } 711 712 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) 713 { 714 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; 715 struct sock *sk = (struct sock *)msk; 716 unsigned int add_addr_accept_max; 717 struct mptcp_addr_info remote; 718 unsigned int subflows_max; 719 bool sf_created = false; 720 int i, nr; 721 722 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); 723 subflows_max = mptcp_pm_get_subflows_max(msk); 724 725 pr_debug("accepted %d:%d remote family %d\n", 726 msk->pm.add_addr_accepted, add_addr_accept_max, 727 msk->pm.remote.family); 728 729 remote = msk->pm.remote; 730 mptcp_pm_announce_addr(msk, &remote, true); 731 mptcp_pm_nl_addr_send_ack(msk); 732 733 if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) 734 return; 735 736 /* pick id 0 port, if none is provided the remote address */ 737 if (!remote.port) 738 remote.port = sk->sk_dport; 739 740 /* connect to the specified remote address, using whatever 741 * local address the routing configuration will pick. 742 */ 743 nr = fill_local_addresses_vec(msk, &remote, addrs); 744 if (nr == 0) 745 return; 746 747 spin_unlock_bh(&msk->pm.lock); 748 for (i = 0; i < nr; i++) 749 if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0) 750 sf_created = true; 751 spin_lock_bh(&msk->pm.lock); 752 753 if (sf_created) { 754 /* add_addr_accepted is not decr for ID 0 */ 755 if (remote.id) 756 msk->pm.add_addr_accepted++; 757 if (msk->pm.add_addr_accepted >= add_addr_accept_max || 758 msk->pm.subflows >= subflows_max) 759 WRITE_ONCE(msk->pm.accept_addr, false); 760 } 761 } 762 763 bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, 764 const struct mptcp_addr_info *remote) 765 { 766 struct mptcp_addr_info mpc_remote; 767 768 remote_address((struct sock_common *)msk, &mpc_remote); 769 return mptcp_addresses_equal(&mpc_remote, remote, remote->port); 770 } 771 772 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) 773 { 774 struct mptcp_subflow_context *subflow; 775 776 msk_owned_by_me(msk); 777 lockdep_assert_held(&msk->pm.lock); 778 779 if (!mptcp_pm_should_add_signal(msk) && 780 !mptcp_pm_should_rm_signal(msk)) 781 return; 782 783 mptcp_for_each_subflow(msk, subflow) { 784 if (__mptcp_subflow_active(subflow)) { 785 mptcp_pm_send_ack(msk, subflow, false, false); 786 break; 787 } 788 } 789 } 790 791 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, 792 struct mptcp_addr_info *addr, 793 struct mptcp_addr_info *rem, 794 u8 bkup) 795 { 796 struct mptcp_subflow_context *subflow; 797 798 pr_debug("bkup=%d\n", bkup); 799 800 mptcp_for_each_subflow(msk, subflow) { 801 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 802 struct mptcp_addr_info local, remote; 803 804 mptcp_local_address((struct sock_common *)ssk, &local); 805 if (!mptcp_addresses_equal(&local, addr, addr->port)) 806 continue; 807 808 if (rem && rem->family != AF_UNSPEC) { 809 remote_address((struct sock_common *)ssk, &remote); 810 if (!mptcp_addresses_equal(&remote, rem, rem->port)) 811 continue; 812 } 813 814 __mptcp_pm_send_ack(msk, subflow, true, bkup); 815 return 0; 816 } 817 818 return -EINVAL; 819 } 820 821 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, 822 const struct mptcp_rm_list *rm_list, 823 enum linux_mptcp_mib_field rm_type) 824 { 825 struct mptcp_subflow_context *subflow, *tmp; 826 struct sock *sk = (struct sock *)msk; 827 u8 i; 828 829 pr_debug("%s rm_list_nr %d\n", 830 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); 831 832 msk_owned_by_me(msk); 833 834 if (sk->sk_state == TCP_LISTEN) 835 return; 836 837 if (!rm_list->nr) 838 return; 839 840 if (list_empty(&msk->conn_list)) 841 return; 842 843 for (i = 0; i < rm_list->nr; i++) { 844 u8 rm_id = rm_list->ids[i]; 845 bool removed = false; 846 847 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 848 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 849 u8 remote_id = READ_ONCE(subflow->remote_id); 850 int how = RCV_SHUTDOWN | SEND_SHUTDOWN; 851 u8 id = subflow_get_local_id(subflow); 852 853 if (inet_sk_state_load(ssk) == TCP_CLOSE) 854 continue; 855 if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) 856 continue; 857 if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) 858 continue; 859 860 pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", 861 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", 862 i, rm_id, id, remote_id, msk->mpc_endpoint_id); 863 spin_unlock_bh(&msk->pm.lock); 864 mptcp_subflow_shutdown(sk, ssk, how); 865 866 /* the following takes care of updating the subflows counter */ 867 mptcp_close_ssk(sk, ssk, subflow); 868 spin_lock_bh(&msk->pm.lock); 869 870 removed |= subflow->request_join; 871 if (rm_type == MPTCP_MIB_RMSUBFLOW) 872 __MPTCP_INC_STATS(sock_net(sk), rm_type); 873 } 874 875 if (rm_type == MPTCP_MIB_RMADDR) 876 __MPTCP_INC_STATS(sock_net(sk), rm_type); 877 878 if (!removed) 879 continue; 880 881 if (!mptcp_pm_is_kernel(msk)) 882 continue; 883 884 if (rm_type == MPTCP_MIB_RMADDR && rm_id && 885 !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { 886 /* Note: if the subflow has been closed before, this 887 * add_addr_accepted counter will not be decremented. 888 */ 889 if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk)) 890 WRITE_ONCE(msk->pm.accept_addr, true); 891 } 892 } 893 } 894 895 static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) 896 { 897 mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); 898 } 899 900 static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, 901 const struct mptcp_rm_list *rm_list) 902 { 903 mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); 904 } 905 906 void mptcp_pm_nl_work(struct mptcp_sock *msk) 907 { 908 struct mptcp_pm_data *pm = &msk->pm; 909 910 msk_owned_by_me(msk); 911 912 if (!(pm->status & MPTCP_PM_WORK_MASK)) 913 return; 914 915 spin_lock_bh(&msk->pm.lock); 916 917 pr_debug("msk=%p status=%x\n", msk, pm->status); 918 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { 919 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); 920 mptcp_pm_nl_add_addr_received(msk); 921 } 922 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { 923 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); 924 mptcp_pm_nl_addr_send_ack(msk); 925 } 926 if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { 927 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); 928 mptcp_pm_nl_rm_addr_received(msk); 929 } 930 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { 931 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); 932 mptcp_pm_nl_fully_established(msk); 933 } 934 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { 935 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); 936 mptcp_pm_nl_subflow_established(msk); 937 } 938 939 spin_unlock_bh(&msk->pm.lock); 940 } 941 942 static bool address_use_port(struct mptcp_pm_addr_entry *entry) 943 { 944 return (entry->flags & 945 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == 946 MPTCP_PM_ADDR_FLAG_SIGNAL; 947 } 948 949 /* caller must ensure the RCU grace period is already elapsed */ 950 static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) 951 { 952 if (entry->lsk) 953 sock_release(entry->lsk); 954 kfree(entry); 955 } 956 957 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, 958 struct mptcp_pm_addr_entry *entry, 959 bool needs_id) 960 { 961 struct mptcp_pm_addr_entry *cur, *del_entry = NULL; 962 unsigned int addr_max; 963 int ret = -EINVAL; 964 965 spin_lock_bh(&pernet->lock); 966 /* to keep the code simple, don't do IDR-like allocation for address ID, 967 * just bail when we exceed limits 968 */ 969 if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) 970 pernet->next_id = 1; 971 if (pernet->addrs >= MPTCP_PM_ADDR_MAX) { 972 ret = -ERANGE; 973 goto out; 974 } 975 if (test_bit(entry->addr.id, pernet->id_bitmap)) { 976 ret = -EBUSY; 977 goto out; 978 } 979 980 /* do not insert duplicate address, differentiate on port only 981 * singled addresses 982 */ 983 if (!address_use_port(entry)) 984 entry->addr.port = 0; 985 list_for_each_entry(cur, &pernet->local_addr_list, list) { 986 if (mptcp_addresses_equal(&cur->addr, &entry->addr, 987 cur->addr.port || entry->addr.port)) { 988 /* allow replacing the exiting endpoint only if such 989 * endpoint is an implicit one and the user-space 990 * did not provide an endpoint id 991 */ 992 if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { 993 ret = -EEXIST; 994 goto out; 995 } 996 if (entry->addr.id) 997 goto out; 998 999 pernet->addrs--; 1000 entry->addr.id = cur->addr.id; 1001 list_del_rcu(&cur->list); 1002 del_entry = cur; 1003 break; 1004 } 1005 } 1006 1007 if (!entry->addr.id && needs_id) { 1008 find_next: 1009 entry->addr.id = find_next_zero_bit(pernet->id_bitmap, 1010 MPTCP_PM_MAX_ADDR_ID + 1, 1011 pernet->next_id); 1012 if (!entry->addr.id && pernet->next_id != 1) { 1013 pernet->next_id = 1; 1014 goto find_next; 1015 } 1016 } 1017 1018 if (!entry->addr.id && needs_id) 1019 goto out; 1020 1021 __set_bit(entry->addr.id, pernet->id_bitmap); 1022 if (entry->addr.id > pernet->next_id) 1023 pernet->next_id = entry->addr.id; 1024 1025 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1026 addr_max = pernet->add_addr_signal_max; 1027 WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1); 1028 } 1029 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1030 addr_max = pernet->local_addr_max; 1031 WRITE_ONCE(pernet->local_addr_max, addr_max + 1); 1032 } 1033 1034 pernet->addrs++; 1035 if (!entry->addr.port) 1036 list_add_tail_rcu(&entry->list, &pernet->local_addr_list); 1037 else 1038 list_add_rcu(&entry->list, &pernet->local_addr_list); 1039 ret = entry->addr.id; 1040 1041 out: 1042 spin_unlock_bh(&pernet->lock); 1043 1044 /* just replaced an existing entry, free it */ 1045 if (del_entry) { 1046 synchronize_rcu(); 1047 __mptcp_pm_release_addr_entry(del_entry); 1048 } 1049 return ret; 1050 } 1051 1052 static struct lock_class_key mptcp_slock_keys[2]; 1053 static struct lock_class_key mptcp_keys[2]; 1054 1055 static int mptcp_pm_nl_create_listen_socket(struct sock *sk, 1056 struct mptcp_pm_addr_entry *entry) 1057 { 1058 bool is_ipv6 = sk->sk_family == AF_INET6; 1059 int addrlen = sizeof(struct sockaddr_in); 1060 struct sockaddr_storage addr; 1061 struct sock *newsk, *ssk; 1062 int backlog = 1024; 1063 int err; 1064 1065 err = sock_create_kern(sock_net(sk), entry->addr.family, 1066 SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); 1067 if (err) 1068 return err; 1069 1070 newsk = entry->lsk->sk; 1071 if (!newsk) 1072 return -EINVAL; 1073 1074 /* The subflow socket lock is acquired in a nested to the msk one 1075 * in several places, even by the TCP stack, and this msk is a kernel 1076 * socket: lockdep complains. Instead of propagating the _nested 1077 * modifiers in several places, re-init the lock class for the msk 1078 * socket to an mptcp specific one. 1079 */ 1080 sock_lock_init_class_and_name(newsk, 1081 is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", 1082 &mptcp_slock_keys[is_ipv6], 1083 is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", 1084 &mptcp_keys[is_ipv6]); 1085 1086 lock_sock(newsk); 1087 ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); 1088 release_sock(newsk); 1089 if (IS_ERR(ssk)) 1090 return PTR_ERR(ssk); 1091 1092 mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); 1093 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1094 if (entry->addr.family == AF_INET6) 1095 addrlen = sizeof(struct sockaddr_in6); 1096 #endif 1097 if (ssk->sk_family == AF_INET) 1098 err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); 1099 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1100 else if (ssk->sk_family == AF_INET6) 1101 err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); 1102 #endif 1103 if (err) 1104 return err; 1105 1106 /* We don't use mptcp_set_state() here because it needs to be called 1107 * under the msk socket lock. For the moment, that will not bring 1108 * anything more than only calling inet_sk_state_store(), because the 1109 * old status is known (TCP_CLOSE). 1110 */ 1111 inet_sk_state_store(newsk, TCP_LISTEN); 1112 lock_sock(ssk); 1113 err = __inet_listen_sk(ssk, backlog); 1114 if (!err) 1115 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); 1116 release_sock(ssk); 1117 return err; 1118 } 1119 1120 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 1121 { 1122 struct mptcp_pm_addr_entry *entry; 1123 struct pm_nl_pernet *pernet; 1124 int ret = -1; 1125 1126 pernet = pm_nl_get_pernet_from_msk(msk); 1127 1128 rcu_read_lock(); 1129 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 1130 if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { 1131 ret = entry->addr.id; 1132 break; 1133 } 1134 } 1135 rcu_read_unlock(); 1136 if (ret >= 0) 1137 return ret; 1138 1139 /* address not found, add to local list */ 1140 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 1141 if (!entry) 1142 return -ENOMEM; 1143 1144 entry->addr = *skc; 1145 entry->addr.id = 0; 1146 entry->addr.port = 0; 1147 entry->ifindex = 0; 1148 entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; 1149 entry->lsk = NULL; 1150 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); 1151 if (ret < 0) 1152 kfree(entry); 1153 1154 return ret; 1155 } 1156 1157 bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 1158 { 1159 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 1160 struct mptcp_pm_addr_entry *entry; 1161 bool backup = false; 1162 1163 rcu_read_lock(); 1164 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 1165 if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { 1166 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1167 break; 1168 } 1169 } 1170 rcu_read_unlock(); 1171 1172 return backup; 1173 } 1174 1175 #define MPTCP_PM_CMD_GRP_OFFSET 0 1176 #define MPTCP_PM_EV_GRP_OFFSET 1 1177 1178 static const struct genl_multicast_group mptcp_pm_mcgrps[] = { 1179 [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, }, 1180 [MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME, 1181 .flags = GENL_MCAST_CAP_NET_ADMIN, 1182 }, 1183 }; 1184 1185 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 1186 { 1187 struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk); 1188 struct sock *sk = (struct sock *)msk; 1189 unsigned int active_max_loss_cnt; 1190 struct net *net = sock_net(sk); 1191 unsigned int stale_loss_cnt; 1192 bool slow; 1193 1194 stale_loss_cnt = mptcp_stale_loss_cnt(net); 1195 if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt) 1196 return; 1197 1198 /* look for another available subflow not in loss state */ 1199 active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1); 1200 mptcp_for_each_subflow(msk, iter) { 1201 if (iter != subflow && mptcp_subflow_active(iter) && 1202 iter->stale_count < active_max_loss_cnt) { 1203 /* we have some alternatives, try to mark this subflow as idle ...*/ 1204 slow = lock_sock_fast(ssk); 1205 if (!tcp_rtx_and_write_queues_empty(ssk)) { 1206 subflow->stale = 1; 1207 __mptcp_retransmit_pending_data(sk); 1208 MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE); 1209 } 1210 unlock_sock_fast(ssk, slow); 1211 1212 /* always try to push the pending data regardless of re-injections: 1213 * we can possibly use backup subflows now, and subflow selection 1214 * is cheap under the msk socket lock 1215 */ 1216 __mptcp_push_pending(sk, 0); 1217 return; 1218 } 1219 } 1220 } 1221 1222 static int mptcp_pm_family_to_addr(int family) 1223 { 1224 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1225 if (family == AF_INET6) 1226 return MPTCP_PM_ADDR_ATTR_ADDR6; 1227 #endif 1228 return MPTCP_PM_ADDR_ATTR_ADDR4; 1229 } 1230 1231 static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[], 1232 const struct nlattr *attr, 1233 struct genl_info *info, 1234 struct mptcp_addr_info *addr, 1235 bool require_family) 1236 { 1237 int err, addr_addr; 1238 1239 if (!attr) { 1240 GENL_SET_ERR_MSG(info, "missing address info"); 1241 return -EINVAL; 1242 } 1243 1244 /* no validation needed - was already done via nested policy */ 1245 err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 1246 mptcp_pm_address_nl_policy, info->extack); 1247 if (err) 1248 return err; 1249 1250 if (tb[MPTCP_PM_ADDR_ATTR_ID]) 1251 addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]); 1252 1253 if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) { 1254 if (!require_family) 1255 return 0; 1256 1257 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1258 "missing family"); 1259 return -EINVAL; 1260 } 1261 1262 addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]); 1263 if (addr->family != AF_INET 1264 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1265 && addr->family != AF_INET6 1266 #endif 1267 ) { 1268 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1269 "unknown address family"); 1270 return -EINVAL; 1271 } 1272 addr_addr = mptcp_pm_family_to_addr(addr->family); 1273 if (!tb[addr_addr]) { 1274 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1275 "missing address data"); 1276 return -EINVAL; 1277 } 1278 1279 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1280 if (addr->family == AF_INET6) 1281 addr->addr6 = nla_get_in6_addr(tb[addr_addr]); 1282 else 1283 #endif 1284 addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]); 1285 1286 if (tb[MPTCP_PM_ADDR_ATTR_PORT]) 1287 addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); 1288 1289 return 0; 1290 } 1291 1292 int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, 1293 struct mptcp_addr_info *addr) 1294 { 1295 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1296 1297 memset(addr, 0, sizeof(*addr)); 1298 1299 return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true); 1300 } 1301 1302 int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, 1303 bool require_family, 1304 struct mptcp_pm_addr_entry *entry) 1305 { 1306 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1307 int err; 1308 1309 memset(entry, 0, sizeof(*entry)); 1310 1311 err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family); 1312 if (err) 1313 return err; 1314 1315 if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) { 1316 u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]); 1317 1318 entry->ifindex = val; 1319 } 1320 1321 if (tb[MPTCP_PM_ADDR_ATTR_FLAGS]) 1322 entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]); 1323 1324 if (tb[MPTCP_PM_ADDR_ATTR_PORT]) 1325 entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); 1326 1327 return 0; 1328 } 1329 1330 static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) 1331 { 1332 return pm_nl_get_pernet(genl_info_net(info)); 1333 } 1334 1335 static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, 1336 struct mptcp_addr_info *addr) 1337 { 1338 struct mptcp_sock *msk; 1339 long s_slot = 0, s_num = 0; 1340 1341 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1342 struct sock *sk = (struct sock *)msk; 1343 struct mptcp_addr_info mpc_addr; 1344 1345 if (!READ_ONCE(msk->fully_established) || 1346 mptcp_pm_is_userspace(msk)) 1347 goto next; 1348 1349 /* if the endp linked to the init sf is re-added with a != ID */ 1350 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 1351 1352 lock_sock(sk); 1353 spin_lock_bh(&msk->pm.lock); 1354 if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) 1355 msk->mpc_endpoint_id = addr->id; 1356 mptcp_pm_create_subflow_or_signal_addr(msk); 1357 spin_unlock_bh(&msk->pm.lock); 1358 release_sock(sk); 1359 1360 next: 1361 sock_put(sk); 1362 cond_resched(); 1363 } 1364 1365 return 0; 1366 } 1367 1368 static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, 1369 struct genl_info *info) 1370 { 1371 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1372 1373 if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 1374 mptcp_pm_address_nl_policy, info->extack) && 1375 tb[MPTCP_PM_ADDR_ATTR_ID]) 1376 return true; 1377 return false; 1378 } 1379 1380 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) 1381 { 1382 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1383 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1384 struct mptcp_pm_addr_entry addr, *entry; 1385 int ret; 1386 1387 ret = mptcp_pm_parse_entry(attr, info, true, &addr); 1388 if (ret < 0) 1389 return ret; 1390 1391 if (addr.addr.port && !address_use_port(&addr)) { 1392 GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); 1393 return -EINVAL; 1394 } 1395 1396 if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && 1397 addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1398 GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh"); 1399 return -EINVAL; 1400 } 1401 1402 if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { 1403 GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint"); 1404 return -EINVAL; 1405 } 1406 1407 entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT); 1408 if (!entry) { 1409 GENL_SET_ERR_MSG(info, "can't allocate addr"); 1410 return -ENOMEM; 1411 } 1412 1413 *entry = addr; 1414 if (entry->addr.port) { 1415 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); 1416 if (ret) { 1417 GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); 1418 goto out_free; 1419 } 1420 } 1421 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, 1422 !mptcp_pm_has_addr_attr_id(attr, info)); 1423 if (ret < 0) { 1424 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); 1425 goto out_free; 1426 } 1427 1428 mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); 1429 return 0; 1430 1431 out_free: 1432 __mptcp_pm_release_addr_entry(entry); 1433 return ret; 1434 } 1435 1436 int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, 1437 u8 *flags, int *ifindex) 1438 { 1439 struct mptcp_pm_addr_entry *entry; 1440 struct sock *sk = (struct sock *)msk; 1441 struct net *net = sock_net(sk); 1442 1443 /* No entries with ID 0 */ 1444 if (id == 0) 1445 return 0; 1446 1447 rcu_read_lock(); 1448 entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id); 1449 if (entry) { 1450 *flags = entry->flags; 1451 *ifindex = entry->ifindex; 1452 } 1453 rcu_read_unlock(); 1454 1455 return 0; 1456 } 1457 1458 static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, 1459 const struct mptcp_addr_info *addr) 1460 { 1461 struct mptcp_pm_add_entry *entry; 1462 1463 entry = mptcp_pm_del_add_timer(msk, addr, false); 1464 if (entry) { 1465 list_del(&entry->list); 1466 kfree(entry); 1467 return true; 1468 } 1469 1470 return false; 1471 } 1472 1473 static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, 1474 const struct mptcp_addr_info *addr) 1475 { 1476 return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; 1477 } 1478 1479 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, 1480 const struct mptcp_addr_info *addr, 1481 bool force) 1482 { 1483 struct mptcp_rm_list list = { .nr = 0 }; 1484 bool ret; 1485 1486 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1487 1488 ret = remove_anno_list_by_saddr(msk, addr); 1489 if (ret || force) { 1490 spin_lock_bh(&msk->pm.lock); 1491 if (ret) { 1492 __set_bit(addr->id, msk->pm.id_avail_bitmap); 1493 msk->pm.add_addr_signaled--; 1494 } 1495 mptcp_pm_remove_addr(msk, &list); 1496 spin_unlock_bh(&msk->pm.lock); 1497 } 1498 return ret; 1499 } 1500 1501 static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) 1502 { 1503 /* If it was marked as used, and not ID 0, decrement local_addr_used */ 1504 if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && 1505 id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) 1506 msk->pm.local_addr_used--; 1507 } 1508 1509 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, 1510 const struct mptcp_pm_addr_entry *entry) 1511 { 1512 const struct mptcp_addr_info *addr = &entry->addr; 1513 struct mptcp_rm_list list = { .nr = 1 }; 1514 long s_slot = 0, s_num = 0; 1515 struct mptcp_sock *msk; 1516 1517 pr_debug("remove_id=%d\n", addr->id); 1518 1519 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1520 struct sock *sk = (struct sock *)msk; 1521 bool remove_subflow; 1522 1523 if (mptcp_pm_is_userspace(msk)) 1524 goto next; 1525 1526 if (list_empty(&msk->conn_list)) { 1527 mptcp_pm_remove_anno_addr(msk, addr, false); 1528 goto next; 1529 } 1530 1531 lock_sock(sk); 1532 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); 1533 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && 1534 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); 1535 1536 list.ids[0] = mptcp_endp_get_local_id(msk, addr); 1537 if (remove_subflow) { 1538 spin_lock_bh(&msk->pm.lock); 1539 mptcp_pm_nl_rm_subflow_received(msk, &list); 1540 spin_unlock_bh(&msk->pm.lock); 1541 } 1542 1543 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1544 spin_lock_bh(&msk->pm.lock); 1545 __mark_subflow_endp_available(msk, list.ids[0]); 1546 spin_unlock_bh(&msk->pm.lock); 1547 } 1548 1549 if (msk->mpc_endpoint_id == entry->addr.id) 1550 msk->mpc_endpoint_id = 0; 1551 release_sock(sk); 1552 1553 next: 1554 sock_put(sk); 1555 cond_resched(); 1556 } 1557 1558 return 0; 1559 } 1560 1561 static int mptcp_nl_remove_id_zero_address(struct net *net, 1562 struct mptcp_addr_info *addr) 1563 { 1564 struct mptcp_rm_list list = { .nr = 0 }; 1565 long s_slot = 0, s_num = 0; 1566 struct mptcp_sock *msk; 1567 1568 list.ids[list.nr++] = 0; 1569 1570 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1571 struct sock *sk = (struct sock *)msk; 1572 struct mptcp_addr_info msk_local; 1573 1574 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1575 goto next; 1576 1577 mptcp_local_address((struct sock_common *)msk, &msk_local); 1578 if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) 1579 goto next; 1580 1581 lock_sock(sk); 1582 spin_lock_bh(&msk->pm.lock); 1583 mptcp_pm_remove_addr(msk, &list); 1584 mptcp_pm_nl_rm_subflow_received(msk, &list); 1585 __mark_subflow_endp_available(msk, 0); 1586 spin_unlock_bh(&msk->pm.lock); 1587 release_sock(sk); 1588 1589 next: 1590 sock_put(sk); 1591 cond_resched(); 1592 } 1593 1594 return 0; 1595 } 1596 1597 int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) 1598 { 1599 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1600 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1601 struct mptcp_pm_addr_entry addr, *entry; 1602 unsigned int addr_max; 1603 int ret; 1604 1605 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1606 if (ret < 0) 1607 return ret; 1608 1609 /* the zero id address is special: the first address used by the msk 1610 * always gets such an id, so different subflows can have different zero 1611 * id addresses. Additionally zero id is not accounted for in id_bitmap. 1612 * Let's use an 'mptcp_rm_list' instead of the common remove code. 1613 */ 1614 if (addr.addr.id == 0) 1615 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); 1616 1617 spin_lock_bh(&pernet->lock); 1618 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1619 if (!entry) { 1620 GENL_SET_ERR_MSG(info, "address not found"); 1621 spin_unlock_bh(&pernet->lock); 1622 return -EINVAL; 1623 } 1624 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1625 addr_max = pernet->add_addr_signal_max; 1626 WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1); 1627 } 1628 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1629 addr_max = pernet->local_addr_max; 1630 WRITE_ONCE(pernet->local_addr_max, addr_max - 1); 1631 } 1632 1633 pernet->addrs--; 1634 list_del_rcu(&entry->list); 1635 __clear_bit(entry->addr.id, pernet->id_bitmap); 1636 spin_unlock_bh(&pernet->lock); 1637 1638 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); 1639 synchronize_rcu(); 1640 __mptcp_pm_release_addr_entry(entry); 1641 1642 return ret; 1643 } 1644 1645 /* Called from the userspace PM only */ 1646 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) 1647 { 1648 struct mptcp_rm_list alist = { .nr = 0 }; 1649 struct mptcp_pm_addr_entry *entry; 1650 int anno_nr = 0; 1651 1652 list_for_each_entry(entry, rm_list, list) { 1653 if (alist.nr >= MPTCP_RM_IDS_MAX) 1654 break; 1655 1656 /* only delete if either announced or matching a subflow */ 1657 if (remove_anno_list_by_saddr(msk, &entry->addr)) 1658 anno_nr++; 1659 else if (!lookup_subflow_by_saddr(&msk->conn_list, 1660 &entry->addr)) 1661 continue; 1662 1663 alist.ids[alist.nr++] = entry->addr.id; 1664 } 1665 1666 if (alist.nr) { 1667 spin_lock_bh(&msk->pm.lock); 1668 msk->pm.add_addr_signaled -= anno_nr; 1669 mptcp_pm_remove_addr(msk, &alist); 1670 spin_unlock_bh(&msk->pm.lock); 1671 } 1672 } 1673 1674 /* Called from the in-kernel PM only */ 1675 static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, 1676 struct list_head *rm_list) 1677 { 1678 struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; 1679 struct mptcp_pm_addr_entry *entry; 1680 1681 list_for_each_entry(entry, rm_list, list) { 1682 if (slist.nr < MPTCP_RM_IDS_MAX && 1683 lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) 1684 slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1685 1686 if (alist.nr < MPTCP_RM_IDS_MAX && 1687 remove_anno_list_by_saddr(msk, &entry->addr)) 1688 alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1689 } 1690 1691 spin_lock_bh(&msk->pm.lock); 1692 if (alist.nr) { 1693 msk->pm.add_addr_signaled -= alist.nr; 1694 mptcp_pm_remove_addr(msk, &alist); 1695 } 1696 if (slist.nr) 1697 mptcp_pm_nl_rm_subflow_received(msk, &slist); 1698 /* Reset counters: maybe some subflows have been removed before */ 1699 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1700 msk->pm.local_addr_used = 0; 1701 spin_unlock_bh(&msk->pm.lock); 1702 } 1703 1704 static void mptcp_nl_remove_addrs_list(struct net *net, 1705 struct list_head *rm_list) 1706 { 1707 long s_slot = 0, s_num = 0; 1708 struct mptcp_sock *msk; 1709 1710 if (list_empty(rm_list)) 1711 return; 1712 1713 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1714 struct sock *sk = (struct sock *)msk; 1715 1716 if (!mptcp_pm_is_userspace(msk)) { 1717 lock_sock(sk); 1718 mptcp_pm_remove_addrs_and_subflows(msk, rm_list); 1719 release_sock(sk); 1720 } 1721 1722 sock_put(sk); 1723 cond_resched(); 1724 } 1725 } 1726 1727 /* caller must ensure the RCU grace period is already elapsed */ 1728 static void __flush_addrs(struct list_head *list) 1729 { 1730 while (!list_empty(list)) { 1731 struct mptcp_pm_addr_entry *cur; 1732 1733 cur = list_entry(list->next, 1734 struct mptcp_pm_addr_entry, list); 1735 list_del_rcu(&cur->list); 1736 __mptcp_pm_release_addr_entry(cur); 1737 } 1738 } 1739 1740 static void __reset_counters(struct pm_nl_pernet *pernet) 1741 { 1742 WRITE_ONCE(pernet->add_addr_signal_max, 0); 1743 WRITE_ONCE(pernet->add_addr_accept_max, 0); 1744 WRITE_ONCE(pernet->local_addr_max, 0); 1745 pernet->addrs = 0; 1746 } 1747 1748 int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) 1749 { 1750 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1751 LIST_HEAD(free_list); 1752 1753 spin_lock_bh(&pernet->lock); 1754 list_splice_init(&pernet->local_addr_list, &free_list); 1755 __reset_counters(pernet); 1756 pernet->next_id = 1; 1757 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1758 spin_unlock_bh(&pernet->lock); 1759 mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list); 1760 synchronize_rcu(); 1761 __flush_addrs(&free_list); 1762 return 0; 1763 } 1764 1765 int mptcp_nl_fill_addr(struct sk_buff *skb, 1766 struct mptcp_pm_addr_entry *entry) 1767 { 1768 struct mptcp_addr_info *addr = &entry->addr; 1769 struct nlattr *attr; 1770 1771 attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR); 1772 if (!attr) 1773 return -EMSGSIZE; 1774 1775 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family)) 1776 goto nla_put_failure; 1777 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port))) 1778 goto nla_put_failure; 1779 if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id)) 1780 goto nla_put_failure; 1781 if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags)) 1782 goto nla_put_failure; 1783 if (entry->ifindex && 1784 nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex)) 1785 goto nla_put_failure; 1786 1787 if (addr->family == AF_INET && 1788 nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4, 1789 addr->addr.s_addr)) 1790 goto nla_put_failure; 1791 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1792 else if (addr->family == AF_INET6 && 1793 nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6)) 1794 goto nla_put_failure; 1795 #endif 1796 nla_nest_end(skb, attr); 1797 return 0; 1798 1799 nla_put_failure: 1800 nla_nest_cancel(skb, attr); 1801 return -EMSGSIZE; 1802 } 1803 1804 int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info) 1805 { 1806 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1807 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1808 struct mptcp_pm_addr_entry addr, *entry; 1809 struct sk_buff *msg; 1810 void *reply; 1811 int ret; 1812 1813 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1814 if (ret < 0) 1815 return ret; 1816 1817 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1818 if (!msg) 1819 return -ENOMEM; 1820 1821 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1822 info->genlhdr->cmd); 1823 if (!reply) { 1824 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1825 ret = -EMSGSIZE; 1826 goto fail; 1827 } 1828 1829 spin_lock_bh(&pernet->lock); 1830 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1831 if (!entry) { 1832 GENL_SET_ERR_MSG(info, "address not found"); 1833 ret = -EINVAL; 1834 goto unlock_fail; 1835 } 1836 1837 ret = mptcp_nl_fill_addr(msg, entry); 1838 if (ret) 1839 goto unlock_fail; 1840 1841 genlmsg_end(msg, reply); 1842 ret = genlmsg_reply(msg, info); 1843 spin_unlock_bh(&pernet->lock); 1844 return ret; 1845 1846 unlock_fail: 1847 spin_unlock_bh(&pernet->lock); 1848 1849 fail: 1850 nlmsg_free(msg); 1851 return ret; 1852 } 1853 1854 int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info) 1855 { 1856 return mptcp_pm_get_addr(skb, info); 1857 } 1858 1859 int mptcp_pm_nl_dump_addr(struct sk_buff *msg, 1860 struct netlink_callback *cb) 1861 { 1862 struct net *net = sock_net(msg->sk); 1863 struct mptcp_pm_addr_entry *entry; 1864 struct pm_nl_pernet *pernet; 1865 int id = cb->args[0]; 1866 void *hdr; 1867 int i; 1868 1869 pernet = pm_nl_get_pernet(net); 1870 1871 spin_lock_bh(&pernet->lock); 1872 for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { 1873 if (test_bit(i, pernet->id_bitmap)) { 1874 entry = __lookup_addr_by_id(pernet, i); 1875 if (!entry) 1876 break; 1877 1878 if (entry->addr.id <= id) 1879 continue; 1880 1881 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, 1882 cb->nlh->nlmsg_seq, &mptcp_genl_family, 1883 NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); 1884 if (!hdr) 1885 break; 1886 1887 if (mptcp_nl_fill_addr(msg, entry) < 0) { 1888 genlmsg_cancel(msg, hdr); 1889 break; 1890 } 1891 1892 id = entry->addr.id; 1893 genlmsg_end(msg, hdr); 1894 } 1895 } 1896 spin_unlock_bh(&pernet->lock); 1897 1898 cb->args[0] = id; 1899 return msg->len; 1900 } 1901 1902 int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg, 1903 struct netlink_callback *cb) 1904 { 1905 return mptcp_pm_dump_addr(msg, cb); 1906 } 1907 1908 static int parse_limit(struct genl_info *info, int id, unsigned int *limit) 1909 { 1910 struct nlattr *attr = info->attrs[id]; 1911 1912 if (!attr) 1913 return 0; 1914 1915 *limit = nla_get_u32(attr); 1916 if (*limit > MPTCP_PM_ADDR_MAX) { 1917 GENL_SET_ERR_MSG(info, "limit greater than maximum"); 1918 return -EINVAL; 1919 } 1920 return 0; 1921 } 1922 1923 int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) 1924 { 1925 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1926 unsigned int rcv_addrs, subflows; 1927 int ret; 1928 1929 spin_lock_bh(&pernet->lock); 1930 rcv_addrs = pernet->add_addr_accept_max; 1931 ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); 1932 if (ret) 1933 goto unlock; 1934 1935 subflows = pernet->subflows_max; 1936 ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); 1937 if (ret) 1938 goto unlock; 1939 1940 WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs); 1941 WRITE_ONCE(pernet->subflows_max, subflows); 1942 1943 unlock: 1944 spin_unlock_bh(&pernet->lock); 1945 return ret; 1946 } 1947 1948 int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) 1949 { 1950 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1951 struct sk_buff *msg; 1952 void *reply; 1953 1954 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1955 if (!msg) 1956 return -ENOMEM; 1957 1958 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1959 MPTCP_PM_CMD_GET_LIMITS); 1960 if (!reply) 1961 goto fail; 1962 1963 if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, 1964 READ_ONCE(pernet->add_addr_accept_max))) 1965 goto fail; 1966 1967 if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, 1968 READ_ONCE(pernet->subflows_max))) 1969 goto fail; 1970 1971 genlmsg_end(msg, reply); 1972 return genlmsg_reply(msg, info); 1973 1974 fail: 1975 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1976 nlmsg_free(msg); 1977 return -EMSGSIZE; 1978 } 1979 1980 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, 1981 struct mptcp_addr_info *addr) 1982 { 1983 struct mptcp_rm_list list = { .nr = 0 }; 1984 1985 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1986 1987 spin_lock_bh(&msk->pm.lock); 1988 mptcp_pm_nl_rm_subflow_received(msk, &list); 1989 __mark_subflow_endp_available(msk, list.ids[0]); 1990 mptcp_pm_create_subflow_or_signal_addr(msk); 1991 spin_unlock_bh(&msk->pm.lock); 1992 } 1993 1994 static int mptcp_nl_set_flags(struct net *net, 1995 struct mptcp_addr_info *addr, 1996 u8 bkup, u8 changed) 1997 { 1998 long s_slot = 0, s_num = 0; 1999 struct mptcp_sock *msk; 2000 int ret = -EINVAL; 2001 2002 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 2003 struct sock *sk = (struct sock *)msk; 2004 2005 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 2006 goto next; 2007 2008 lock_sock(sk); 2009 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) 2010 ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup); 2011 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) 2012 mptcp_pm_nl_fullmesh(msk, addr); 2013 release_sock(sk); 2014 2015 next: 2016 sock_put(sk); 2017 cond_resched(); 2018 } 2019 2020 return ret; 2021 } 2022 2023 int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info) 2024 { 2025 struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }; 2026 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; 2027 u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | 2028 MPTCP_PM_ADDR_FLAG_FULLMESH; 2029 struct net *net = sock_net(skb->sk); 2030 struct mptcp_pm_addr_entry *entry; 2031 struct pm_nl_pernet *pernet; 2032 u8 lookup_by_id = 0; 2033 u8 bkup = 0; 2034 int ret; 2035 2036 pernet = pm_nl_get_pernet(net); 2037 2038 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 2039 if (ret < 0) 2040 return ret; 2041 2042 if (addr.addr.family == AF_UNSPEC) { 2043 lookup_by_id = 1; 2044 if (!addr.addr.id) { 2045 GENL_SET_ERR_MSG(info, "missing required inputs"); 2046 return -EOPNOTSUPP; 2047 } 2048 } 2049 2050 if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) 2051 bkup = 1; 2052 2053 spin_lock_bh(&pernet->lock); 2054 entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) : 2055 __lookup_addr(pernet, &addr.addr); 2056 if (!entry) { 2057 spin_unlock_bh(&pernet->lock); 2058 GENL_SET_ERR_MSG(info, "address not found"); 2059 return -EINVAL; 2060 } 2061 if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && 2062 (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { 2063 spin_unlock_bh(&pernet->lock); 2064 GENL_SET_ERR_MSG(info, "invalid addr flags"); 2065 return -EINVAL; 2066 } 2067 2068 changed = (addr.flags ^ entry->flags) & mask; 2069 entry->flags = (entry->flags & ~mask) | (addr.flags & mask); 2070 addr = *entry; 2071 spin_unlock_bh(&pernet->lock); 2072 2073 mptcp_nl_set_flags(net, &addr.addr, bkup, changed); 2074 return 0; 2075 } 2076 2077 int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info) 2078 { 2079 return mptcp_pm_set_flags(skb, info); 2080 } 2081 2082 static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp) 2083 { 2084 genlmsg_multicast_netns(&mptcp_genl_family, net, 2085 nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp); 2086 } 2087 2088 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk) 2089 { 2090 return genl_has_listeners(&mptcp_genl_family, 2091 sock_net((const struct sock *)msk), 2092 MPTCP_PM_EV_GRP_OFFSET); 2093 } 2094 2095 static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk) 2096 { 2097 const struct inet_sock *issk = inet_sk(ssk); 2098 const struct mptcp_subflow_context *sf; 2099 2100 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) 2101 return -EMSGSIZE; 2102 2103 switch (ssk->sk_family) { 2104 case AF_INET: 2105 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) 2106 return -EMSGSIZE; 2107 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr)) 2108 return -EMSGSIZE; 2109 break; 2110 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2111 case AF_INET6: { 2112 const struct ipv6_pinfo *np = inet6_sk(ssk); 2113 2114 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) 2115 return -EMSGSIZE; 2116 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr)) 2117 return -EMSGSIZE; 2118 break; 2119 } 2120 #endif 2121 default: 2122 WARN_ON_ONCE(1); 2123 return -EMSGSIZE; 2124 } 2125 2126 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) 2127 return -EMSGSIZE; 2128 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport)) 2129 return -EMSGSIZE; 2130 2131 sf = mptcp_subflow_ctx(ssk); 2132 if (WARN_ON_ONCE(!sf)) 2133 return -EINVAL; 2134 2135 if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf))) 2136 return -EMSGSIZE; 2137 2138 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id)) 2139 return -EMSGSIZE; 2140 2141 return 0; 2142 } 2143 2144 static int mptcp_event_put_token_and_ssk(struct sk_buff *skb, 2145 const struct mptcp_sock *msk, 2146 const struct sock *ssk) 2147 { 2148 const struct sock *sk = (const struct sock *)msk; 2149 const struct mptcp_subflow_context *sf; 2150 u8 sk_err; 2151 2152 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2153 return -EMSGSIZE; 2154 2155 if (mptcp_event_add_subflow(skb, ssk)) 2156 return -EMSGSIZE; 2157 2158 sf = mptcp_subflow_ctx(ssk); 2159 if (WARN_ON_ONCE(!sf)) 2160 return -EINVAL; 2161 2162 if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup)) 2163 return -EMSGSIZE; 2164 2165 if (ssk->sk_bound_dev_if && 2166 nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if)) 2167 return -EMSGSIZE; 2168 2169 sk_err = READ_ONCE(ssk->sk_err); 2170 if (sk_err && sk->sk_state == TCP_ESTABLISHED && 2171 nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err)) 2172 return -EMSGSIZE; 2173 2174 return 0; 2175 } 2176 2177 static int mptcp_event_sub_established(struct sk_buff *skb, 2178 const struct mptcp_sock *msk, 2179 const struct sock *ssk) 2180 { 2181 return mptcp_event_put_token_and_ssk(skb, msk, ssk); 2182 } 2183 2184 static int mptcp_event_sub_closed(struct sk_buff *skb, 2185 const struct mptcp_sock *msk, 2186 const struct sock *ssk) 2187 { 2188 const struct mptcp_subflow_context *sf; 2189 2190 if (mptcp_event_put_token_and_ssk(skb, msk, ssk)) 2191 return -EMSGSIZE; 2192 2193 sf = mptcp_subflow_ctx(ssk); 2194 if (!sf->reset_seen) 2195 return 0; 2196 2197 if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason)) 2198 return -EMSGSIZE; 2199 2200 if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient)) 2201 return -EMSGSIZE; 2202 2203 return 0; 2204 } 2205 2206 static int mptcp_event_created(struct sk_buff *skb, 2207 const struct mptcp_sock *msk, 2208 const struct sock *ssk) 2209 { 2210 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)); 2211 2212 if (err) 2213 return err; 2214 2215 if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) 2216 return -EMSGSIZE; 2217 2218 return mptcp_event_add_subflow(skb, ssk); 2219 } 2220 2221 void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id) 2222 { 2223 struct net *net = sock_net((const struct sock *)msk); 2224 struct nlmsghdr *nlh; 2225 struct sk_buff *skb; 2226 2227 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2228 return; 2229 2230 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 2231 if (!skb) 2232 return; 2233 2234 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED); 2235 if (!nlh) 2236 goto nla_put_failure; 2237 2238 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2239 goto nla_put_failure; 2240 2241 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id)) 2242 goto nla_put_failure; 2243 2244 genlmsg_end(skb, nlh); 2245 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); 2246 return; 2247 2248 nla_put_failure: 2249 nlmsg_free(skb); 2250 } 2251 2252 void mptcp_event_addr_announced(const struct sock *ssk, 2253 const struct mptcp_addr_info *info) 2254 { 2255 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 2256 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 2257 struct net *net = sock_net(ssk); 2258 struct nlmsghdr *nlh; 2259 struct sk_buff *skb; 2260 2261 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2262 return; 2263 2264 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 2265 if (!skb) 2266 return; 2267 2268 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, 2269 MPTCP_EVENT_ANNOUNCED); 2270 if (!nlh) 2271 goto nla_put_failure; 2272 2273 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2274 goto nla_put_failure; 2275 2276 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id)) 2277 goto nla_put_failure; 2278 2279 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, 2280 info->port == 0 ? 2281 inet_sk(ssk)->inet_dport : 2282 info->port)) 2283 goto nla_put_failure; 2284 2285 switch (info->family) { 2286 case AF_INET: 2287 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr)) 2288 goto nla_put_failure; 2289 break; 2290 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2291 case AF_INET6: 2292 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6)) 2293 goto nla_put_failure; 2294 break; 2295 #endif 2296 default: 2297 WARN_ON_ONCE(1); 2298 goto nla_put_failure; 2299 } 2300 2301 genlmsg_end(skb, nlh); 2302 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); 2303 return; 2304 2305 nla_put_failure: 2306 nlmsg_free(skb); 2307 } 2308 2309 void mptcp_event_pm_listener(const struct sock *ssk, 2310 enum mptcp_event_type event) 2311 { 2312 const struct inet_sock *issk = inet_sk(ssk); 2313 struct net *net = sock_net(ssk); 2314 struct nlmsghdr *nlh; 2315 struct sk_buff *skb; 2316 2317 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2318 return; 2319 2320 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2321 if (!skb) 2322 return; 2323 2324 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, event); 2325 if (!nlh) 2326 goto nla_put_failure; 2327 2328 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) 2329 goto nla_put_failure; 2330 2331 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) 2332 goto nla_put_failure; 2333 2334 switch (ssk->sk_family) { 2335 case AF_INET: 2336 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) 2337 goto nla_put_failure; 2338 break; 2339 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2340 case AF_INET6: { 2341 const struct ipv6_pinfo *np = inet6_sk(ssk); 2342 2343 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) 2344 goto nla_put_failure; 2345 break; 2346 } 2347 #endif 2348 default: 2349 WARN_ON_ONCE(1); 2350 goto nla_put_failure; 2351 } 2352 2353 genlmsg_end(skb, nlh); 2354 mptcp_nl_mcast_send(net, skb, GFP_KERNEL); 2355 return; 2356 2357 nla_put_failure: 2358 nlmsg_free(skb); 2359 } 2360 2361 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, 2362 const struct sock *ssk, gfp_t gfp) 2363 { 2364 struct net *net = sock_net((const struct sock *)msk); 2365 struct nlmsghdr *nlh; 2366 struct sk_buff *skb; 2367 2368 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2369 return; 2370 2371 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 2372 if (!skb) 2373 return; 2374 2375 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type); 2376 if (!nlh) 2377 goto nla_put_failure; 2378 2379 switch (type) { 2380 case MPTCP_EVENT_UNSPEC: 2381 WARN_ON_ONCE(1); 2382 break; 2383 case MPTCP_EVENT_CREATED: 2384 case MPTCP_EVENT_ESTABLISHED: 2385 if (mptcp_event_created(skb, msk, ssk) < 0) 2386 goto nla_put_failure; 2387 break; 2388 case MPTCP_EVENT_CLOSED: 2389 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0) 2390 goto nla_put_failure; 2391 break; 2392 case MPTCP_EVENT_ANNOUNCED: 2393 case MPTCP_EVENT_REMOVED: 2394 /* call mptcp_event_addr_announced()/removed instead */ 2395 WARN_ON_ONCE(1); 2396 break; 2397 case MPTCP_EVENT_SUB_ESTABLISHED: 2398 case MPTCP_EVENT_SUB_PRIORITY: 2399 if (mptcp_event_sub_established(skb, msk, ssk) < 0) 2400 goto nla_put_failure; 2401 break; 2402 case MPTCP_EVENT_SUB_CLOSED: 2403 if (mptcp_event_sub_closed(skb, msk, ssk) < 0) 2404 goto nla_put_failure; 2405 break; 2406 case MPTCP_EVENT_LISTENER_CREATED: 2407 case MPTCP_EVENT_LISTENER_CLOSED: 2408 break; 2409 } 2410 2411 genlmsg_end(skb, nlh); 2412 mptcp_nl_mcast_send(net, skb, gfp); 2413 return; 2414 2415 nla_put_failure: 2416 nlmsg_free(skb); 2417 } 2418 2419 struct genl_family mptcp_genl_family __ro_after_init = { 2420 .name = MPTCP_PM_NAME, 2421 .version = MPTCP_PM_VER, 2422 .netnsok = true, 2423 .module = THIS_MODULE, 2424 .ops = mptcp_pm_nl_ops, 2425 .n_ops = ARRAY_SIZE(mptcp_pm_nl_ops), 2426 .resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1, 2427 .mcgrps = mptcp_pm_mcgrps, 2428 .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps), 2429 }; 2430 2431 static int __net_init pm_nl_init_net(struct net *net) 2432 { 2433 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 2434 2435 INIT_LIST_HEAD_RCU(&pernet->local_addr_list); 2436 2437 /* Cit. 2 subflows ought to be enough for anybody. */ 2438 pernet->subflows_max = 2; 2439 pernet->next_id = 1; 2440 pernet->stale_loss_cnt = 4; 2441 spin_lock_init(&pernet->lock); 2442 2443 /* No need to initialize other pernet fields, the struct is zeroed at 2444 * allocation time. 2445 */ 2446 2447 return 0; 2448 } 2449 2450 static void __net_exit pm_nl_exit_net(struct list_head *net_list) 2451 { 2452 struct net *net; 2453 2454 list_for_each_entry(net, net_list, exit_list) { 2455 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 2456 2457 /* net is removed from namespace list, can't race with 2458 * other modifiers, also netns core already waited for a 2459 * RCU grace period. 2460 */ 2461 __flush_addrs(&pernet->local_addr_list); 2462 } 2463 } 2464 2465 static struct pernet_operations mptcp_pm_pernet_ops = { 2466 .init = pm_nl_init_net, 2467 .exit_batch = pm_nl_exit_net, 2468 .id = &pm_nl_pernet_id, 2469 .size = sizeof(struct pm_nl_pernet), 2470 }; 2471 2472 void __init mptcp_pm_nl_init(void) 2473 { 2474 if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) 2475 panic("Failed to register MPTCP PM pernet subsystem.\n"); 2476 2477 if (genl_register_family(&mptcp_genl_family)) 2478 panic("Failed to register MPTCP PM netlink family\n"); 2479 } 2480