1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2020, Red Hat, Inc. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/inet.h> 10 #include <linux/kernel.h> 11 #include <net/inet_common.h> 12 #include <net/netns/generic.h> 13 #include <net/mptcp.h> 14 15 #include "protocol.h" 16 #include "mib.h" 17 #include "mptcp_pm_gen.h" 18 19 static int pm_nl_pernet_id; 20 21 struct mptcp_pm_add_entry { 22 struct list_head list; 23 struct mptcp_addr_info addr; 24 u8 retrans_times; 25 struct timer_list add_timer; 26 struct mptcp_sock *sock; 27 }; 28 29 struct pm_nl_pernet { 30 /* protects pernet updates */ 31 spinlock_t lock; 32 struct list_head local_addr_list; 33 unsigned int addrs; 34 unsigned int stale_loss_cnt; 35 unsigned int add_addr_signal_max; 36 unsigned int add_addr_accept_max; 37 unsigned int local_addr_max; 38 unsigned int subflows_max; 39 unsigned int next_id; 40 DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 41 }; 42 43 #define MPTCP_PM_ADDR_MAX 8 44 #define ADD_ADDR_RETRANS_MAX 3 45 46 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) 47 { 48 return net_generic(net, pm_nl_pernet_id); 49 } 50 51 static struct pm_nl_pernet * 52 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) 53 { 54 return pm_nl_get_pernet(sock_net((struct sock *)msk)); 55 } 56 57 bool mptcp_addresses_equal(const struct mptcp_addr_info *a, 58 const struct mptcp_addr_info *b, bool use_port) 59 { 60 bool addr_equals = false; 61 62 if (a->family == b->family) { 63 if (a->family == AF_INET) 64 addr_equals = a->addr.s_addr == b->addr.s_addr; 65 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 66 else 67 addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6); 68 } else if (a->family == AF_INET) { 69 if (ipv6_addr_v4mapped(&b->addr6)) 70 addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3]; 71 } else if (b->family == AF_INET) { 72 if (ipv6_addr_v4mapped(&a->addr6)) 73 addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr; 74 #endif 75 } 76 77 if (!addr_equals) 78 return false; 79 if (!use_port) 80 return true; 81 82 return a->port == b->port; 83 } 84 85 void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr) 86 { 87 addr->family = skc->skc_family; 88 addr->port = htons(skc->skc_num); 89 if (addr->family == AF_INET) 90 addr->addr.s_addr = skc->skc_rcv_saddr; 91 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 92 else if (addr->family == AF_INET6) 93 addr->addr6 = skc->skc_v6_rcv_saddr; 94 #endif 95 } 96 97 static void remote_address(const struct sock_common *skc, 98 struct mptcp_addr_info *addr) 99 { 100 addr->family = skc->skc_family; 101 addr->port = skc->skc_dport; 102 if (addr->family == AF_INET) 103 addr->addr.s_addr = skc->skc_daddr; 104 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 105 else if (addr->family == AF_INET6) 106 addr->addr6 = skc->skc_v6_daddr; 107 #endif 108 } 109 110 static bool lookup_subflow_by_saddr(const struct list_head *list, 111 const struct mptcp_addr_info *saddr) 112 { 113 struct mptcp_subflow_context *subflow; 114 struct mptcp_addr_info cur; 115 struct sock_common *skc; 116 117 list_for_each_entry(subflow, list, node) { 118 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); 119 120 mptcp_local_address(skc, &cur); 121 if (mptcp_addresses_equal(&cur, saddr, saddr->port)) 122 return true; 123 } 124 125 return false; 126 } 127 128 static bool lookup_subflow_by_daddr(const struct list_head *list, 129 const struct mptcp_addr_info *daddr) 130 { 131 struct mptcp_subflow_context *subflow; 132 struct mptcp_addr_info cur; 133 134 list_for_each_entry(subflow, list, node) { 135 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 136 137 if (!((1 << inet_sk_state_load(ssk)) & 138 (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) 139 continue; 140 141 remote_address((struct sock_common *)ssk, &cur); 142 if (mptcp_addresses_equal(&cur, daddr, daddr->port)) 143 return true; 144 } 145 146 return false; 147 } 148 149 static bool 150 select_local_address(const struct pm_nl_pernet *pernet, 151 const struct mptcp_sock *msk, 152 struct mptcp_pm_addr_entry *new_entry) 153 { 154 struct mptcp_pm_addr_entry *entry; 155 bool found = false; 156 157 msk_owned_by_me(msk); 158 159 rcu_read_lock(); 160 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 161 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) 162 continue; 163 164 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 165 continue; 166 167 *new_entry = *entry; 168 found = true; 169 break; 170 } 171 rcu_read_unlock(); 172 173 return found; 174 } 175 176 static bool 177 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, 178 struct mptcp_pm_addr_entry *new_entry) 179 { 180 struct mptcp_pm_addr_entry *entry; 181 bool found = false; 182 183 rcu_read_lock(); 184 /* do not keep any additional per socket state, just signal 185 * the address list in order. 186 * Note: removal from the local address list during the msk life-cycle 187 * can lead to additional addresses not being announced. 188 */ 189 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 190 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 191 continue; 192 193 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) 194 continue; 195 196 *new_entry = *entry; 197 found = true; 198 break; 199 } 200 rcu_read_unlock(); 201 202 return found; 203 } 204 205 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk) 206 { 207 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 208 209 return READ_ONCE(pernet->add_addr_signal_max); 210 } 211 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max); 212 213 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk) 214 { 215 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 216 217 return READ_ONCE(pernet->add_addr_accept_max); 218 } 219 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max); 220 221 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk) 222 { 223 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 224 225 return READ_ONCE(pernet->subflows_max); 226 } 227 EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max); 228 229 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk) 230 { 231 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 232 233 return READ_ONCE(pernet->local_addr_max); 234 } 235 EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max); 236 237 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) 238 { 239 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 240 241 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) || 242 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, 243 MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { 244 WRITE_ONCE(msk->pm.work_pending, false); 245 return false; 246 } 247 return true; 248 } 249 250 struct mptcp_pm_add_entry * 251 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, 252 const struct mptcp_addr_info *addr) 253 { 254 struct mptcp_pm_add_entry *entry; 255 256 lockdep_assert_held(&msk->pm.lock); 257 258 list_for_each_entry(entry, &msk->pm.anno_list, list) { 259 if (mptcp_addresses_equal(&entry->addr, addr, true)) 260 return entry; 261 } 262 263 return NULL; 264 } 265 266 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) 267 { 268 struct mptcp_pm_add_entry *entry; 269 struct mptcp_addr_info saddr; 270 bool ret = false; 271 272 mptcp_local_address((struct sock_common *)sk, &saddr); 273 274 spin_lock_bh(&msk->pm.lock); 275 list_for_each_entry(entry, &msk->pm.anno_list, list) { 276 if (mptcp_addresses_equal(&entry->addr, &saddr, true)) { 277 ret = true; 278 goto out; 279 } 280 } 281 282 out: 283 spin_unlock_bh(&msk->pm.lock); 284 return ret; 285 } 286 287 static void mptcp_pm_add_timer(struct timer_list *timer) 288 { 289 struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); 290 struct mptcp_sock *msk = entry->sock; 291 struct sock *sk = (struct sock *)msk; 292 293 pr_debug("msk=%p\n", msk); 294 295 if (!msk) 296 return; 297 298 if (inet_sk_state_load(sk) == TCP_CLOSE) 299 return; 300 301 if (!entry->addr.id) 302 return; 303 304 if (mptcp_pm_should_add_signal_addr(msk)) { 305 sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8); 306 goto out; 307 } 308 309 spin_lock_bh(&msk->pm.lock); 310 311 if (!mptcp_pm_should_add_signal_addr(msk)) { 312 pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); 313 mptcp_pm_announce_addr(msk, &entry->addr, false); 314 mptcp_pm_add_addr_send_ack(msk); 315 entry->retrans_times++; 316 } 317 318 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) 319 sk_reset_timer(sk, timer, 320 jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); 321 322 spin_unlock_bh(&msk->pm.lock); 323 324 if (entry->retrans_times == ADD_ADDR_RETRANS_MAX) 325 mptcp_pm_subflow_established(msk); 326 327 out: 328 __sock_put(sk); 329 } 330 331 struct mptcp_pm_add_entry * 332 mptcp_pm_del_add_timer(struct mptcp_sock *msk, 333 const struct mptcp_addr_info *addr, bool check_id) 334 { 335 struct mptcp_pm_add_entry *entry; 336 struct sock *sk = (struct sock *)msk; 337 338 spin_lock_bh(&msk->pm.lock); 339 entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 340 if (entry && (!check_id || entry->addr.id == addr->id)) 341 entry->retrans_times = ADD_ADDR_RETRANS_MAX; 342 spin_unlock_bh(&msk->pm.lock); 343 344 if (entry && (!check_id || entry->addr.id == addr->id)) 345 sk_stop_timer_sync(sk, &entry->add_timer); 346 347 return entry; 348 } 349 350 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, 351 const struct mptcp_addr_info *addr) 352 { 353 struct mptcp_pm_add_entry *add_entry = NULL; 354 struct sock *sk = (struct sock *)msk; 355 struct net *net = sock_net(sk); 356 357 lockdep_assert_held(&msk->pm.lock); 358 359 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); 360 361 if (add_entry) { 362 if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) 363 return false; 364 365 sk_reset_timer(sk, &add_entry->add_timer, 366 jiffies + mptcp_get_add_addr_timeout(net)); 367 return true; 368 } 369 370 add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); 371 if (!add_entry) 372 return false; 373 374 list_add(&add_entry->list, &msk->pm.anno_list); 375 376 add_entry->addr = *addr; 377 add_entry->sock = msk; 378 add_entry->retrans_times = 0; 379 380 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); 381 sk_reset_timer(sk, &add_entry->add_timer, 382 jiffies + mptcp_get_add_addr_timeout(net)); 383 384 return true; 385 } 386 387 void mptcp_pm_free_anno_list(struct mptcp_sock *msk) 388 { 389 struct mptcp_pm_add_entry *entry, *tmp; 390 struct sock *sk = (struct sock *)msk; 391 LIST_HEAD(free_list); 392 393 pr_debug("msk=%p\n", msk); 394 395 spin_lock_bh(&msk->pm.lock); 396 list_splice_init(&msk->pm.anno_list, &free_list); 397 spin_unlock_bh(&msk->pm.lock); 398 399 list_for_each_entry_safe(entry, tmp, &free_list, list) { 400 sk_stop_timer_sync(sk, &entry->add_timer); 401 kfree(entry); 402 } 403 } 404 405 /* Fill all the remote addresses into the array addrs[], 406 * and return the array size. 407 */ 408 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, 409 struct mptcp_addr_info *local, 410 bool fullmesh, 411 struct mptcp_addr_info *addrs) 412 { 413 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); 414 struct sock *sk = (struct sock *)msk, *ssk; 415 struct mptcp_subflow_context *subflow; 416 struct mptcp_addr_info remote = { 0 }; 417 unsigned int subflows_max; 418 int i = 0; 419 420 subflows_max = mptcp_pm_get_subflows_max(msk); 421 remote_address((struct sock_common *)sk, &remote); 422 423 /* Non-fullmesh endpoint, fill in the single entry 424 * corresponding to the primary MPC subflow remote address 425 */ 426 if (!fullmesh) { 427 if (deny_id0) 428 return 0; 429 430 if (!mptcp_pm_addr_families_match(sk, local, &remote)) 431 return 0; 432 433 msk->pm.subflows++; 434 addrs[i++] = remote; 435 } else { 436 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 437 438 /* Forbid creation of new subflows matching existing 439 * ones, possibly already created by incoming ADD_ADDR 440 */ 441 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 442 mptcp_for_each_subflow(msk, subflow) 443 if (READ_ONCE(subflow->local_id) == local->id) 444 __set_bit(subflow->remote_id, unavail_id); 445 446 mptcp_for_each_subflow(msk, subflow) { 447 ssk = mptcp_subflow_tcp_sock(subflow); 448 remote_address((struct sock_common *)ssk, &addrs[i]); 449 addrs[i].id = READ_ONCE(subflow->remote_id); 450 if (deny_id0 && !addrs[i].id) 451 continue; 452 453 if (test_bit(addrs[i].id, unavail_id)) 454 continue; 455 456 if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) 457 continue; 458 459 if (msk->pm.subflows < subflows_max) { 460 /* forbid creating multiple address towards 461 * this id 462 */ 463 __set_bit(addrs[i].id, unavail_id); 464 msk->pm.subflows++; 465 i++; 466 } 467 } 468 } 469 470 return i; 471 } 472 473 static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 474 bool prio, bool backup) 475 { 476 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 477 bool slow; 478 479 pr_debug("send ack for %s\n", 480 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); 481 482 slow = lock_sock_fast(ssk); 483 if (prio) { 484 subflow->send_mp_prio = 1; 485 subflow->request_bkup = backup; 486 } 487 488 __mptcp_subflow_send_ack(ssk); 489 unlock_sock_fast(ssk, slow); 490 } 491 492 static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, 493 bool prio, bool backup) 494 { 495 spin_unlock_bh(&msk->pm.lock); 496 __mptcp_pm_send_ack(msk, subflow, prio, backup); 497 spin_lock_bh(&msk->pm.lock); 498 } 499 500 static struct mptcp_pm_addr_entry * 501 __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) 502 { 503 struct mptcp_pm_addr_entry *entry; 504 505 list_for_each_entry(entry, &pernet->local_addr_list, list) { 506 if (entry->addr.id == id) 507 return entry; 508 } 509 return NULL; 510 } 511 512 static struct mptcp_pm_addr_entry * 513 __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) 514 { 515 struct mptcp_pm_addr_entry *entry; 516 517 list_for_each_entry(entry, &pernet->local_addr_list, list) { 518 if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) 519 return entry; 520 } 521 return NULL; 522 } 523 524 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) 525 { 526 struct sock *sk = (struct sock *)msk; 527 struct mptcp_pm_addr_entry local; 528 unsigned int add_addr_signal_max; 529 bool signal_and_subflow = false; 530 unsigned int local_addr_max; 531 struct pm_nl_pernet *pernet; 532 unsigned int subflows_max; 533 534 pernet = pm_nl_get_pernet(sock_net(sk)); 535 536 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk); 537 local_addr_max = mptcp_pm_get_local_addr_max(msk); 538 subflows_max = mptcp_pm_get_subflows_max(msk); 539 540 /* do lazy endpoint usage accounting for the MPC subflows */ 541 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) { 542 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first); 543 struct mptcp_pm_addr_entry *entry; 544 struct mptcp_addr_info mpc_addr; 545 bool backup = false; 546 547 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); 548 rcu_read_lock(); 549 entry = __lookup_addr(pernet, &mpc_addr); 550 if (entry) { 551 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); 552 msk->mpc_endpoint_id = entry->addr.id; 553 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 554 } 555 rcu_read_unlock(); 556 557 if (backup) 558 mptcp_pm_send_ack(msk, subflow, true, backup); 559 560 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); 561 } 562 563 pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", 564 msk->pm.local_addr_used, local_addr_max, 565 msk->pm.add_addr_signaled, add_addr_signal_max, 566 msk->pm.subflows, subflows_max); 567 568 /* check first for announce */ 569 if (msk->pm.add_addr_signaled < add_addr_signal_max) { 570 /* due to racing events on both ends we can reach here while 571 * previous add address is still running: if we invoke now 572 * mptcp_pm_announce_addr(), that will fail and the 573 * corresponding id will be marked as used. 574 * Instead let the PM machinery reschedule us when the 575 * current address announce will be completed. 576 */ 577 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) 578 return; 579 580 if (!select_signal_address(pernet, msk, &local)) 581 goto subflow; 582 583 /* If the alloc fails, we are on memory pressure, not worth 584 * continuing, and trying to create subflows. 585 */ 586 if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) 587 return; 588 589 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 590 msk->pm.add_addr_signaled++; 591 592 /* Special case for ID0: set the correct ID */ 593 if (local.addr.id == msk->mpc_endpoint_id) 594 local.addr.id = 0; 595 596 mptcp_pm_announce_addr(msk, &local.addr, false); 597 mptcp_pm_nl_addr_send_ack(msk); 598 599 if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 600 signal_and_subflow = true; 601 } 602 603 subflow: 604 /* check if should create a new subflow */ 605 while (msk->pm.local_addr_used < local_addr_max && 606 msk->pm.subflows < subflows_max) { 607 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; 608 bool fullmesh; 609 int i, nr; 610 611 if (signal_and_subflow) 612 signal_and_subflow = false; 613 else if (!select_local_address(pernet, msk, &local)) 614 break; 615 616 fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); 617 618 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 619 620 /* Special case for ID0: set the correct ID */ 621 if (local.addr.id == msk->mpc_endpoint_id) 622 local.addr.id = 0; 623 else /* local_addr_used is not decr for ID 0 */ 624 msk->pm.local_addr_used++; 625 626 nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); 627 if (nr == 0) 628 continue; 629 630 spin_unlock_bh(&msk->pm.lock); 631 for (i = 0; i < nr; i++) 632 __mptcp_subflow_connect(sk, &local.addr, &addrs[i]); 633 spin_lock_bh(&msk->pm.lock); 634 } 635 mptcp_pm_nl_check_work_pending(msk); 636 } 637 638 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) 639 { 640 mptcp_pm_create_subflow_or_signal_addr(msk); 641 } 642 643 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) 644 { 645 mptcp_pm_create_subflow_or_signal_addr(msk); 646 } 647 648 /* Fill all the local addresses into the array addrs[], 649 * and return the array size. 650 */ 651 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, 652 struct mptcp_addr_info *remote, 653 struct mptcp_addr_info *addrs) 654 { 655 struct sock *sk = (struct sock *)msk; 656 struct mptcp_pm_addr_entry *entry; 657 struct mptcp_addr_info mpc_addr; 658 struct pm_nl_pernet *pernet; 659 unsigned int subflows_max; 660 int i = 0; 661 662 pernet = pm_nl_get_pernet_from_msk(msk); 663 subflows_max = mptcp_pm_get_subflows_max(msk); 664 665 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 666 667 rcu_read_lock(); 668 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 669 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) 670 continue; 671 672 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) 673 continue; 674 675 if (msk->pm.subflows < subflows_max) { 676 msk->pm.subflows++; 677 addrs[i] = entry->addr; 678 679 /* Special case for ID0: set the correct ID */ 680 if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port)) 681 addrs[i].id = 0; 682 683 i++; 684 } 685 } 686 rcu_read_unlock(); 687 688 /* If the array is empty, fill in the single 689 * 'IPADDRANY' local address 690 */ 691 if (!i) { 692 struct mptcp_addr_info local; 693 694 memset(&local, 0, sizeof(local)); 695 local.family = 696 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 697 remote->family == AF_INET6 && 698 ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : 699 #endif 700 remote->family; 701 702 if (!mptcp_pm_addr_families_match(sk, &local, remote)) 703 return 0; 704 705 msk->pm.subflows++; 706 addrs[i++] = local; 707 } 708 709 return i; 710 } 711 712 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) 713 { 714 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; 715 struct sock *sk = (struct sock *)msk; 716 unsigned int add_addr_accept_max; 717 struct mptcp_addr_info remote; 718 unsigned int subflows_max; 719 bool sf_created = false; 720 int i, nr; 721 722 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); 723 subflows_max = mptcp_pm_get_subflows_max(msk); 724 725 pr_debug("accepted %d:%d remote family %d\n", 726 msk->pm.add_addr_accepted, add_addr_accept_max, 727 msk->pm.remote.family); 728 729 remote = msk->pm.remote; 730 mptcp_pm_announce_addr(msk, &remote, true); 731 mptcp_pm_nl_addr_send_ack(msk); 732 733 if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) 734 return; 735 736 /* pick id 0 port, if none is provided the remote address */ 737 if (!remote.port) 738 remote.port = sk->sk_dport; 739 740 /* connect to the specified remote address, using whatever 741 * local address the routing configuration will pick. 742 */ 743 nr = fill_local_addresses_vec(msk, &remote, addrs); 744 if (nr == 0) 745 return; 746 747 spin_unlock_bh(&msk->pm.lock); 748 for (i = 0; i < nr; i++) 749 if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0) 750 sf_created = true; 751 spin_lock_bh(&msk->pm.lock); 752 753 if (sf_created) { 754 /* add_addr_accepted is not decr for ID 0 */ 755 if (remote.id) 756 msk->pm.add_addr_accepted++; 757 if (msk->pm.add_addr_accepted >= add_addr_accept_max || 758 msk->pm.subflows >= subflows_max) 759 WRITE_ONCE(msk->pm.accept_addr, false); 760 } 761 } 762 763 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) 764 { 765 struct mptcp_subflow_context *subflow; 766 767 msk_owned_by_me(msk); 768 lockdep_assert_held(&msk->pm.lock); 769 770 if (!mptcp_pm_should_add_signal(msk) && 771 !mptcp_pm_should_rm_signal(msk)) 772 return; 773 774 mptcp_for_each_subflow(msk, subflow) { 775 if (__mptcp_subflow_active(subflow)) { 776 mptcp_pm_send_ack(msk, subflow, false, false); 777 break; 778 } 779 } 780 } 781 782 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, 783 struct mptcp_addr_info *addr, 784 struct mptcp_addr_info *rem, 785 u8 bkup) 786 { 787 struct mptcp_subflow_context *subflow; 788 789 pr_debug("bkup=%d\n", bkup); 790 791 mptcp_for_each_subflow(msk, subflow) { 792 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 793 struct mptcp_addr_info local, remote; 794 795 mptcp_local_address((struct sock_common *)ssk, &local); 796 if (!mptcp_addresses_equal(&local, addr, addr->port)) 797 continue; 798 799 if (rem && rem->family != AF_UNSPEC) { 800 remote_address((struct sock_common *)ssk, &remote); 801 if (!mptcp_addresses_equal(&remote, rem, rem->port)) 802 continue; 803 } 804 805 __mptcp_pm_send_ack(msk, subflow, true, bkup); 806 return 0; 807 } 808 809 return -EINVAL; 810 } 811 812 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, 813 const struct mptcp_rm_list *rm_list, 814 enum linux_mptcp_mib_field rm_type) 815 { 816 struct mptcp_subflow_context *subflow, *tmp; 817 struct sock *sk = (struct sock *)msk; 818 u8 i; 819 820 pr_debug("%s rm_list_nr %d\n", 821 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); 822 823 msk_owned_by_me(msk); 824 825 if (sk->sk_state == TCP_LISTEN) 826 return; 827 828 if (!rm_list->nr) 829 return; 830 831 if (list_empty(&msk->conn_list)) 832 return; 833 834 for (i = 0; i < rm_list->nr; i++) { 835 u8 rm_id = rm_list->ids[i]; 836 bool removed = false; 837 838 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 839 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 840 u8 remote_id = READ_ONCE(subflow->remote_id); 841 int how = RCV_SHUTDOWN | SEND_SHUTDOWN; 842 u8 id = subflow_get_local_id(subflow); 843 844 if (inet_sk_state_load(ssk) == TCP_CLOSE) 845 continue; 846 if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) 847 continue; 848 if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) 849 continue; 850 851 pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", 852 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", 853 i, rm_id, id, remote_id, msk->mpc_endpoint_id); 854 spin_unlock_bh(&msk->pm.lock); 855 mptcp_subflow_shutdown(sk, ssk, how); 856 857 /* the following takes care of updating the subflows counter */ 858 mptcp_close_ssk(sk, ssk, subflow); 859 spin_lock_bh(&msk->pm.lock); 860 861 removed |= subflow->request_join; 862 if (rm_type == MPTCP_MIB_RMSUBFLOW) 863 __MPTCP_INC_STATS(sock_net(sk), rm_type); 864 } 865 866 if (rm_type == MPTCP_MIB_RMADDR) 867 __MPTCP_INC_STATS(sock_net(sk), rm_type); 868 869 if (!removed) 870 continue; 871 872 if (!mptcp_pm_is_kernel(msk)) 873 continue; 874 875 if (rm_type == MPTCP_MIB_RMADDR && rm_id && 876 !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { 877 /* Note: if the subflow has been closed before, this 878 * add_addr_accepted counter will not be decremented. 879 */ 880 if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk)) 881 WRITE_ONCE(msk->pm.accept_addr, true); 882 } 883 } 884 } 885 886 static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) 887 { 888 mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); 889 } 890 891 static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, 892 const struct mptcp_rm_list *rm_list) 893 { 894 mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); 895 } 896 897 void mptcp_pm_nl_work(struct mptcp_sock *msk) 898 { 899 struct mptcp_pm_data *pm = &msk->pm; 900 901 msk_owned_by_me(msk); 902 903 if (!(pm->status & MPTCP_PM_WORK_MASK)) 904 return; 905 906 spin_lock_bh(&msk->pm.lock); 907 908 pr_debug("msk=%p status=%x\n", msk, pm->status); 909 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { 910 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); 911 mptcp_pm_nl_add_addr_received(msk); 912 } 913 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { 914 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); 915 mptcp_pm_nl_addr_send_ack(msk); 916 } 917 if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { 918 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); 919 mptcp_pm_nl_rm_addr_received(msk); 920 } 921 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { 922 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); 923 mptcp_pm_nl_fully_established(msk); 924 } 925 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { 926 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); 927 mptcp_pm_nl_subflow_established(msk); 928 } 929 930 spin_unlock_bh(&msk->pm.lock); 931 } 932 933 static bool address_use_port(struct mptcp_pm_addr_entry *entry) 934 { 935 return (entry->flags & 936 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == 937 MPTCP_PM_ADDR_FLAG_SIGNAL; 938 } 939 940 /* caller must ensure the RCU grace period is already elapsed */ 941 static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) 942 { 943 if (entry->lsk) 944 sock_release(entry->lsk); 945 kfree(entry); 946 } 947 948 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, 949 struct mptcp_pm_addr_entry *entry, 950 bool needs_id) 951 { 952 struct mptcp_pm_addr_entry *cur, *del_entry = NULL; 953 unsigned int addr_max; 954 int ret = -EINVAL; 955 956 spin_lock_bh(&pernet->lock); 957 /* to keep the code simple, don't do IDR-like allocation for address ID, 958 * just bail when we exceed limits 959 */ 960 if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) 961 pernet->next_id = 1; 962 if (pernet->addrs >= MPTCP_PM_ADDR_MAX) { 963 ret = -ERANGE; 964 goto out; 965 } 966 if (test_bit(entry->addr.id, pernet->id_bitmap)) { 967 ret = -EBUSY; 968 goto out; 969 } 970 971 /* do not insert duplicate address, differentiate on port only 972 * singled addresses 973 */ 974 if (!address_use_port(entry)) 975 entry->addr.port = 0; 976 list_for_each_entry(cur, &pernet->local_addr_list, list) { 977 if (mptcp_addresses_equal(&cur->addr, &entry->addr, 978 cur->addr.port || entry->addr.port)) { 979 /* allow replacing the exiting endpoint only if such 980 * endpoint is an implicit one and the user-space 981 * did not provide an endpoint id 982 */ 983 if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { 984 ret = -EEXIST; 985 goto out; 986 } 987 if (entry->addr.id) 988 goto out; 989 990 pernet->addrs--; 991 entry->addr.id = cur->addr.id; 992 list_del_rcu(&cur->list); 993 del_entry = cur; 994 break; 995 } 996 } 997 998 if (!entry->addr.id && needs_id) { 999 find_next: 1000 entry->addr.id = find_next_zero_bit(pernet->id_bitmap, 1001 MPTCP_PM_MAX_ADDR_ID + 1, 1002 pernet->next_id); 1003 if (!entry->addr.id && pernet->next_id != 1) { 1004 pernet->next_id = 1; 1005 goto find_next; 1006 } 1007 } 1008 1009 if (!entry->addr.id && needs_id) 1010 goto out; 1011 1012 __set_bit(entry->addr.id, pernet->id_bitmap); 1013 if (entry->addr.id > pernet->next_id) 1014 pernet->next_id = entry->addr.id; 1015 1016 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1017 addr_max = pernet->add_addr_signal_max; 1018 WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1); 1019 } 1020 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1021 addr_max = pernet->local_addr_max; 1022 WRITE_ONCE(pernet->local_addr_max, addr_max + 1); 1023 } 1024 1025 pernet->addrs++; 1026 if (!entry->addr.port) 1027 list_add_tail_rcu(&entry->list, &pernet->local_addr_list); 1028 else 1029 list_add_rcu(&entry->list, &pernet->local_addr_list); 1030 ret = entry->addr.id; 1031 1032 out: 1033 spin_unlock_bh(&pernet->lock); 1034 1035 /* just replaced an existing entry, free it */ 1036 if (del_entry) { 1037 synchronize_rcu(); 1038 __mptcp_pm_release_addr_entry(del_entry); 1039 } 1040 return ret; 1041 } 1042 1043 static struct lock_class_key mptcp_slock_keys[2]; 1044 static struct lock_class_key mptcp_keys[2]; 1045 1046 static int mptcp_pm_nl_create_listen_socket(struct sock *sk, 1047 struct mptcp_pm_addr_entry *entry) 1048 { 1049 bool is_ipv6 = sk->sk_family == AF_INET6; 1050 int addrlen = sizeof(struct sockaddr_in); 1051 struct sockaddr_storage addr; 1052 struct sock *newsk, *ssk; 1053 int backlog = 1024; 1054 int err; 1055 1056 err = sock_create_kern(sock_net(sk), entry->addr.family, 1057 SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); 1058 if (err) 1059 return err; 1060 1061 newsk = entry->lsk->sk; 1062 if (!newsk) 1063 return -EINVAL; 1064 1065 /* The subflow socket lock is acquired in a nested to the msk one 1066 * in several places, even by the TCP stack, and this msk is a kernel 1067 * socket: lockdep complains. Instead of propagating the _nested 1068 * modifiers in several places, re-init the lock class for the msk 1069 * socket to an mptcp specific one. 1070 */ 1071 sock_lock_init_class_and_name(newsk, 1072 is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", 1073 &mptcp_slock_keys[is_ipv6], 1074 is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", 1075 &mptcp_keys[is_ipv6]); 1076 1077 lock_sock(newsk); 1078 ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); 1079 release_sock(newsk); 1080 if (IS_ERR(ssk)) 1081 return PTR_ERR(ssk); 1082 1083 mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); 1084 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1085 if (entry->addr.family == AF_INET6) 1086 addrlen = sizeof(struct sockaddr_in6); 1087 #endif 1088 if (ssk->sk_family == AF_INET) 1089 err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); 1090 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1091 else if (ssk->sk_family == AF_INET6) 1092 err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); 1093 #endif 1094 if (err) 1095 return err; 1096 1097 /* We don't use mptcp_set_state() here because it needs to be called 1098 * under the msk socket lock. For the moment, that will not bring 1099 * anything more than only calling inet_sk_state_store(), because the 1100 * old status is known (TCP_CLOSE). 1101 */ 1102 inet_sk_state_store(newsk, TCP_LISTEN); 1103 lock_sock(ssk); 1104 err = __inet_listen_sk(ssk, backlog); 1105 if (!err) 1106 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); 1107 release_sock(ssk); 1108 return err; 1109 } 1110 1111 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 1112 { 1113 struct mptcp_pm_addr_entry *entry; 1114 struct pm_nl_pernet *pernet; 1115 int ret = -1; 1116 1117 pernet = pm_nl_get_pernet_from_msk(msk); 1118 1119 rcu_read_lock(); 1120 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 1121 if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { 1122 ret = entry->addr.id; 1123 break; 1124 } 1125 } 1126 rcu_read_unlock(); 1127 if (ret >= 0) 1128 return ret; 1129 1130 /* address not found, add to local list */ 1131 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 1132 if (!entry) 1133 return -ENOMEM; 1134 1135 entry->addr = *skc; 1136 entry->addr.id = 0; 1137 entry->addr.port = 0; 1138 entry->ifindex = 0; 1139 entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; 1140 entry->lsk = NULL; 1141 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); 1142 if (ret < 0) 1143 kfree(entry); 1144 1145 return ret; 1146 } 1147 1148 bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 1149 { 1150 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 1151 struct mptcp_pm_addr_entry *entry; 1152 bool backup = false; 1153 1154 rcu_read_lock(); 1155 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { 1156 if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { 1157 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1158 break; 1159 } 1160 } 1161 rcu_read_unlock(); 1162 1163 return backup; 1164 } 1165 1166 #define MPTCP_PM_CMD_GRP_OFFSET 0 1167 #define MPTCP_PM_EV_GRP_OFFSET 1 1168 1169 static const struct genl_multicast_group mptcp_pm_mcgrps[] = { 1170 [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, }, 1171 [MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME, 1172 .flags = GENL_MCAST_CAP_NET_ADMIN, 1173 }, 1174 }; 1175 1176 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 1177 { 1178 struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk); 1179 struct sock *sk = (struct sock *)msk; 1180 unsigned int active_max_loss_cnt; 1181 struct net *net = sock_net(sk); 1182 unsigned int stale_loss_cnt; 1183 bool slow; 1184 1185 stale_loss_cnt = mptcp_stale_loss_cnt(net); 1186 if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt) 1187 return; 1188 1189 /* look for another available subflow not in loss state */ 1190 active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1); 1191 mptcp_for_each_subflow(msk, iter) { 1192 if (iter != subflow && mptcp_subflow_active(iter) && 1193 iter->stale_count < active_max_loss_cnt) { 1194 /* we have some alternatives, try to mark this subflow as idle ...*/ 1195 slow = lock_sock_fast(ssk); 1196 if (!tcp_rtx_and_write_queues_empty(ssk)) { 1197 subflow->stale = 1; 1198 __mptcp_retransmit_pending_data(sk); 1199 MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE); 1200 } 1201 unlock_sock_fast(ssk, slow); 1202 1203 /* always try to push the pending data regardless of re-injections: 1204 * we can possibly use backup subflows now, and subflow selection 1205 * is cheap under the msk socket lock 1206 */ 1207 __mptcp_push_pending(sk, 0); 1208 return; 1209 } 1210 } 1211 } 1212 1213 static int mptcp_pm_family_to_addr(int family) 1214 { 1215 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1216 if (family == AF_INET6) 1217 return MPTCP_PM_ADDR_ATTR_ADDR6; 1218 #endif 1219 return MPTCP_PM_ADDR_ATTR_ADDR4; 1220 } 1221 1222 static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[], 1223 const struct nlattr *attr, 1224 struct genl_info *info, 1225 struct mptcp_addr_info *addr, 1226 bool require_family) 1227 { 1228 int err, addr_addr; 1229 1230 if (!attr) { 1231 GENL_SET_ERR_MSG(info, "missing address info"); 1232 return -EINVAL; 1233 } 1234 1235 /* no validation needed - was already done via nested policy */ 1236 err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 1237 mptcp_pm_address_nl_policy, info->extack); 1238 if (err) 1239 return err; 1240 1241 if (tb[MPTCP_PM_ADDR_ATTR_ID]) 1242 addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]); 1243 1244 if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) { 1245 if (!require_family) 1246 return 0; 1247 1248 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1249 "missing family"); 1250 return -EINVAL; 1251 } 1252 1253 addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]); 1254 if (addr->family != AF_INET 1255 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1256 && addr->family != AF_INET6 1257 #endif 1258 ) { 1259 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1260 "unknown address family"); 1261 return -EINVAL; 1262 } 1263 addr_addr = mptcp_pm_family_to_addr(addr->family); 1264 if (!tb[addr_addr]) { 1265 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1266 "missing address data"); 1267 return -EINVAL; 1268 } 1269 1270 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1271 if (addr->family == AF_INET6) 1272 addr->addr6 = nla_get_in6_addr(tb[addr_addr]); 1273 else 1274 #endif 1275 addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]); 1276 1277 if (tb[MPTCP_PM_ADDR_ATTR_PORT]) 1278 addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); 1279 1280 return 0; 1281 } 1282 1283 int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, 1284 struct mptcp_addr_info *addr) 1285 { 1286 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1287 1288 memset(addr, 0, sizeof(*addr)); 1289 1290 return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true); 1291 } 1292 1293 int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, 1294 bool require_family, 1295 struct mptcp_pm_addr_entry *entry) 1296 { 1297 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1298 int err; 1299 1300 memset(entry, 0, sizeof(*entry)); 1301 1302 err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family); 1303 if (err) 1304 return err; 1305 1306 if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) { 1307 u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]); 1308 1309 entry->ifindex = val; 1310 } 1311 1312 if (tb[MPTCP_PM_ADDR_ATTR_FLAGS]) 1313 entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]); 1314 1315 if (tb[MPTCP_PM_ADDR_ATTR_PORT]) 1316 entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); 1317 1318 return 0; 1319 } 1320 1321 static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) 1322 { 1323 return pm_nl_get_pernet(genl_info_net(info)); 1324 } 1325 1326 static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, 1327 struct mptcp_addr_info *addr) 1328 { 1329 struct mptcp_sock *msk; 1330 long s_slot = 0, s_num = 0; 1331 1332 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1333 struct sock *sk = (struct sock *)msk; 1334 struct mptcp_addr_info mpc_addr; 1335 1336 if (!READ_ONCE(msk->fully_established) || 1337 mptcp_pm_is_userspace(msk)) 1338 goto next; 1339 1340 /* if the endp linked to the init sf is re-added with a != ID */ 1341 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 1342 1343 lock_sock(sk); 1344 spin_lock_bh(&msk->pm.lock); 1345 if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) 1346 msk->mpc_endpoint_id = addr->id; 1347 mptcp_pm_create_subflow_or_signal_addr(msk); 1348 spin_unlock_bh(&msk->pm.lock); 1349 release_sock(sk); 1350 1351 next: 1352 sock_put(sk); 1353 cond_resched(); 1354 } 1355 1356 return 0; 1357 } 1358 1359 static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, 1360 struct genl_info *info) 1361 { 1362 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 1363 1364 if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 1365 mptcp_pm_address_nl_policy, info->extack) && 1366 tb[MPTCP_PM_ADDR_ATTR_ID]) 1367 return true; 1368 return false; 1369 } 1370 1371 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) 1372 { 1373 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1374 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1375 struct mptcp_pm_addr_entry addr, *entry; 1376 int ret; 1377 1378 ret = mptcp_pm_parse_entry(attr, info, true, &addr); 1379 if (ret < 0) 1380 return ret; 1381 1382 if (addr.addr.port && !address_use_port(&addr)) { 1383 GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); 1384 return -EINVAL; 1385 } 1386 1387 if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && 1388 addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1389 GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh"); 1390 return -EINVAL; 1391 } 1392 1393 if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { 1394 GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint"); 1395 return -EINVAL; 1396 } 1397 1398 entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT); 1399 if (!entry) { 1400 GENL_SET_ERR_MSG(info, "can't allocate addr"); 1401 return -ENOMEM; 1402 } 1403 1404 *entry = addr; 1405 if (entry->addr.port) { 1406 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); 1407 if (ret) { 1408 GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); 1409 goto out_free; 1410 } 1411 } 1412 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, 1413 !mptcp_pm_has_addr_attr_id(attr, info)); 1414 if (ret < 0) { 1415 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); 1416 goto out_free; 1417 } 1418 1419 mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); 1420 return 0; 1421 1422 out_free: 1423 __mptcp_pm_release_addr_entry(entry); 1424 return ret; 1425 } 1426 1427 int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, 1428 u8 *flags, int *ifindex) 1429 { 1430 struct mptcp_pm_addr_entry *entry; 1431 struct sock *sk = (struct sock *)msk; 1432 struct net *net = sock_net(sk); 1433 1434 /* No entries with ID 0 */ 1435 if (id == 0) 1436 return 0; 1437 1438 rcu_read_lock(); 1439 entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id); 1440 if (entry) { 1441 *flags = entry->flags; 1442 *ifindex = entry->ifindex; 1443 } 1444 rcu_read_unlock(); 1445 1446 return 0; 1447 } 1448 1449 static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, 1450 const struct mptcp_addr_info *addr) 1451 { 1452 struct mptcp_pm_add_entry *entry; 1453 1454 entry = mptcp_pm_del_add_timer(msk, addr, false); 1455 if (entry) { 1456 list_del(&entry->list); 1457 kfree(entry); 1458 return true; 1459 } 1460 1461 return false; 1462 } 1463 1464 static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, 1465 const struct mptcp_addr_info *addr) 1466 { 1467 return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; 1468 } 1469 1470 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, 1471 const struct mptcp_addr_info *addr, 1472 bool force) 1473 { 1474 struct mptcp_rm_list list = { .nr = 0 }; 1475 bool ret; 1476 1477 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1478 1479 ret = remove_anno_list_by_saddr(msk, addr); 1480 if (ret || force) { 1481 spin_lock_bh(&msk->pm.lock); 1482 if (ret) { 1483 __set_bit(addr->id, msk->pm.id_avail_bitmap); 1484 msk->pm.add_addr_signaled--; 1485 } 1486 mptcp_pm_remove_addr(msk, &list); 1487 spin_unlock_bh(&msk->pm.lock); 1488 } 1489 return ret; 1490 } 1491 1492 static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) 1493 { 1494 /* If it was marked as used, and not ID 0, decrement local_addr_used */ 1495 if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && 1496 id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) 1497 msk->pm.local_addr_used--; 1498 } 1499 1500 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, 1501 const struct mptcp_pm_addr_entry *entry) 1502 { 1503 const struct mptcp_addr_info *addr = &entry->addr; 1504 struct mptcp_rm_list list = { .nr = 1 }; 1505 long s_slot = 0, s_num = 0; 1506 struct mptcp_sock *msk; 1507 1508 pr_debug("remove_id=%d\n", addr->id); 1509 1510 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1511 struct sock *sk = (struct sock *)msk; 1512 bool remove_subflow; 1513 1514 if (mptcp_pm_is_userspace(msk)) 1515 goto next; 1516 1517 if (list_empty(&msk->conn_list)) { 1518 mptcp_pm_remove_anno_addr(msk, addr, false); 1519 goto next; 1520 } 1521 1522 lock_sock(sk); 1523 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); 1524 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && 1525 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); 1526 1527 list.ids[0] = mptcp_endp_get_local_id(msk, addr); 1528 if (remove_subflow) { 1529 spin_lock_bh(&msk->pm.lock); 1530 mptcp_pm_nl_rm_subflow_received(msk, &list); 1531 spin_unlock_bh(&msk->pm.lock); 1532 } 1533 1534 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1535 spin_lock_bh(&msk->pm.lock); 1536 __mark_subflow_endp_available(msk, list.ids[0]); 1537 spin_unlock_bh(&msk->pm.lock); 1538 } 1539 1540 if (msk->mpc_endpoint_id == entry->addr.id) 1541 msk->mpc_endpoint_id = 0; 1542 release_sock(sk); 1543 1544 next: 1545 sock_put(sk); 1546 cond_resched(); 1547 } 1548 1549 return 0; 1550 } 1551 1552 static int mptcp_nl_remove_id_zero_address(struct net *net, 1553 struct mptcp_addr_info *addr) 1554 { 1555 struct mptcp_rm_list list = { .nr = 0 }; 1556 long s_slot = 0, s_num = 0; 1557 struct mptcp_sock *msk; 1558 1559 list.ids[list.nr++] = 0; 1560 1561 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1562 struct sock *sk = (struct sock *)msk; 1563 struct mptcp_addr_info msk_local; 1564 1565 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1566 goto next; 1567 1568 mptcp_local_address((struct sock_common *)msk, &msk_local); 1569 if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) 1570 goto next; 1571 1572 lock_sock(sk); 1573 spin_lock_bh(&msk->pm.lock); 1574 mptcp_pm_remove_addr(msk, &list); 1575 mptcp_pm_nl_rm_subflow_received(msk, &list); 1576 __mark_subflow_endp_available(msk, 0); 1577 spin_unlock_bh(&msk->pm.lock); 1578 release_sock(sk); 1579 1580 next: 1581 sock_put(sk); 1582 cond_resched(); 1583 } 1584 1585 return 0; 1586 } 1587 1588 int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) 1589 { 1590 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1591 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1592 struct mptcp_pm_addr_entry addr, *entry; 1593 unsigned int addr_max; 1594 int ret; 1595 1596 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1597 if (ret < 0) 1598 return ret; 1599 1600 /* the zero id address is special: the first address used by the msk 1601 * always gets such an id, so different subflows can have different zero 1602 * id addresses. Additionally zero id is not accounted for in id_bitmap. 1603 * Let's use an 'mptcp_rm_list' instead of the common remove code. 1604 */ 1605 if (addr.addr.id == 0) 1606 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); 1607 1608 spin_lock_bh(&pernet->lock); 1609 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1610 if (!entry) { 1611 GENL_SET_ERR_MSG(info, "address not found"); 1612 spin_unlock_bh(&pernet->lock); 1613 return -EINVAL; 1614 } 1615 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1616 addr_max = pernet->add_addr_signal_max; 1617 WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1); 1618 } 1619 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1620 addr_max = pernet->local_addr_max; 1621 WRITE_ONCE(pernet->local_addr_max, addr_max - 1); 1622 } 1623 1624 pernet->addrs--; 1625 list_del_rcu(&entry->list); 1626 __clear_bit(entry->addr.id, pernet->id_bitmap); 1627 spin_unlock_bh(&pernet->lock); 1628 1629 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); 1630 synchronize_rcu(); 1631 __mptcp_pm_release_addr_entry(entry); 1632 1633 return ret; 1634 } 1635 1636 /* Called from the userspace PM only */ 1637 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) 1638 { 1639 struct mptcp_rm_list alist = { .nr = 0 }; 1640 struct mptcp_pm_addr_entry *entry; 1641 int anno_nr = 0; 1642 1643 list_for_each_entry(entry, rm_list, list) { 1644 if (alist.nr >= MPTCP_RM_IDS_MAX) 1645 break; 1646 1647 /* only delete if either announced or matching a subflow */ 1648 if (remove_anno_list_by_saddr(msk, &entry->addr)) 1649 anno_nr++; 1650 else if (!lookup_subflow_by_saddr(&msk->conn_list, 1651 &entry->addr)) 1652 continue; 1653 1654 alist.ids[alist.nr++] = entry->addr.id; 1655 } 1656 1657 if (alist.nr) { 1658 spin_lock_bh(&msk->pm.lock); 1659 msk->pm.add_addr_signaled -= anno_nr; 1660 mptcp_pm_remove_addr(msk, &alist); 1661 spin_unlock_bh(&msk->pm.lock); 1662 } 1663 } 1664 1665 /* Called from the in-kernel PM only */ 1666 static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, 1667 struct list_head *rm_list) 1668 { 1669 struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; 1670 struct mptcp_pm_addr_entry *entry; 1671 1672 list_for_each_entry(entry, rm_list, list) { 1673 if (slist.nr < MPTCP_RM_IDS_MAX && 1674 lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) 1675 slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1676 1677 if (alist.nr < MPTCP_RM_IDS_MAX && 1678 remove_anno_list_by_saddr(msk, &entry->addr)) 1679 alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1680 } 1681 1682 spin_lock_bh(&msk->pm.lock); 1683 if (alist.nr) { 1684 msk->pm.add_addr_signaled -= alist.nr; 1685 mptcp_pm_remove_addr(msk, &alist); 1686 } 1687 if (slist.nr) 1688 mptcp_pm_nl_rm_subflow_received(msk, &slist); 1689 /* Reset counters: maybe some subflows have been removed before */ 1690 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1691 msk->pm.local_addr_used = 0; 1692 spin_unlock_bh(&msk->pm.lock); 1693 } 1694 1695 static void mptcp_nl_remove_addrs_list(struct net *net, 1696 struct list_head *rm_list) 1697 { 1698 long s_slot = 0, s_num = 0; 1699 struct mptcp_sock *msk; 1700 1701 if (list_empty(rm_list)) 1702 return; 1703 1704 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1705 struct sock *sk = (struct sock *)msk; 1706 1707 if (!mptcp_pm_is_userspace(msk)) { 1708 lock_sock(sk); 1709 mptcp_pm_remove_addrs_and_subflows(msk, rm_list); 1710 release_sock(sk); 1711 } 1712 1713 sock_put(sk); 1714 cond_resched(); 1715 } 1716 } 1717 1718 /* caller must ensure the RCU grace period is already elapsed */ 1719 static void __flush_addrs(struct list_head *list) 1720 { 1721 while (!list_empty(list)) { 1722 struct mptcp_pm_addr_entry *cur; 1723 1724 cur = list_entry(list->next, 1725 struct mptcp_pm_addr_entry, list); 1726 list_del_rcu(&cur->list); 1727 __mptcp_pm_release_addr_entry(cur); 1728 } 1729 } 1730 1731 static void __reset_counters(struct pm_nl_pernet *pernet) 1732 { 1733 WRITE_ONCE(pernet->add_addr_signal_max, 0); 1734 WRITE_ONCE(pernet->add_addr_accept_max, 0); 1735 WRITE_ONCE(pernet->local_addr_max, 0); 1736 pernet->addrs = 0; 1737 } 1738 1739 int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) 1740 { 1741 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1742 LIST_HEAD(free_list); 1743 1744 spin_lock_bh(&pernet->lock); 1745 list_splice_init(&pernet->local_addr_list, &free_list); 1746 __reset_counters(pernet); 1747 pernet->next_id = 1; 1748 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1749 spin_unlock_bh(&pernet->lock); 1750 mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list); 1751 synchronize_rcu(); 1752 __flush_addrs(&free_list); 1753 return 0; 1754 } 1755 1756 int mptcp_nl_fill_addr(struct sk_buff *skb, 1757 struct mptcp_pm_addr_entry *entry) 1758 { 1759 struct mptcp_addr_info *addr = &entry->addr; 1760 struct nlattr *attr; 1761 1762 attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR); 1763 if (!attr) 1764 return -EMSGSIZE; 1765 1766 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family)) 1767 goto nla_put_failure; 1768 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port))) 1769 goto nla_put_failure; 1770 if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id)) 1771 goto nla_put_failure; 1772 if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags)) 1773 goto nla_put_failure; 1774 if (entry->ifindex && 1775 nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex)) 1776 goto nla_put_failure; 1777 1778 if (addr->family == AF_INET && 1779 nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4, 1780 addr->addr.s_addr)) 1781 goto nla_put_failure; 1782 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1783 else if (addr->family == AF_INET6 && 1784 nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6)) 1785 goto nla_put_failure; 1786 #endif 1787 nla_nest_end(skb, attr); 1788 return 0; 1789 1790 nla_put_failure: 1791 nla_nest_cancel(skb, attr); 1792 return -EMSGSIZE; 1793 } 1794 1795 int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info) 1796 { 1797 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1798 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1799 struct mptcp_pm_addr_entry addr, *entry; 1800 struct sk_buff *msg; 1801 void *reply; 1802 int ret; 1803 1804 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1805 if (ret < 0) 1806 return ret; 1807 1808 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1809 if (!msg) 1810 return -ENOMEM; 1811 1812 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1813 info->genlhdr->cmd); 1814 if (!reply) { 1815 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1816 ret = -EMSGSIZE; 1817 goto fail; 1818 } 1819 1820 spin_lock_bh(&pernet->lock); 1821 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1822 if (!entry) { 1823 GENL_SET_ERR_MSG(info, "address not found"); 1824 ret = -EINVAL; 1825 goto unlock_fail; 1826 } 1827 1828 ret = mptcp_nl_fill_addr(msg, entry); 1829 if (ret) 1830 goto unlock_fail; 1831 1832 genlmsg_end(msg, reply); 1833 ret = genlmsg_reply(msg, info); 1834 spin_unlock_bh(&pernet->lock); 1835 return ret; 1836 1837 unlock_fail: 1838 spin_unlock_bh(&pernet->lock); 1839 1840 fail: 1841 nlmsg_free(msg); 1842 return ret; 1843 } 1844 1845 int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info) 1846 { 1847 return mptcp_pm_get_addr(skb, info); 1848 } 1849 1850 int mptcp_pm_nl_dump_addr(struct sk_buff *msg, 1851 struct netlink_callback *cb) 1852 { 1853 struct net *net = sock_net(msg->sk); 1854 struct mptcp_pm_addr_entry *entry; 1855 struct pm_nl_pernet *pernet; 1856 int id = cb->args[0]; 1857 void *hdr; 1858 int i; 1859 1860 pernet = pm_nl_get_pernet(net); 1861 1862 spin_lock_bh(&pernet->lock); 1863 for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { 1864 if (test_bit(i, pernet->id_bitmap)) { 1865 entry = __lookup_addr_by_id(pernet, i); 1866 if (!entry) 1867 break; 1868 1869 if (entry->addr.id <= id) 1870 continue; 1871 1872 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, 1873 cb->nlh->nlmsg_seq, &mptcp_genl_family, 1874 NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); 1875 if (!hdr) 1876 break; 1877 1878 if (mptcp_nl_fill_addr(msg, entry) < 0) { 1879 genlmsg_cancel(msg, hdr); 1880 break; 1881 } 1882 1883 id = entry->addr.id; 1884 genlmsg_end(msg, hdr); 1885 } 1886 } 1887 spin_unlock_bh(&pernet->lock); 1888 1889 cb->args[0] = id; 1890 return msg->len; 1891 } 1892 1893 int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg, 1894 struct netlink_callback *cb) 1895 { 1896 return mptcp_pm_dump_addr(msg, cb); 1897 } 1898 1899 static int parse_limit(struct genl_info *info, int id, unsigned int *limit) 1900 { 1901 struct nlattr *attr = info->attrs[id]; 1902 1903 if (!attr) 1904 return 0; 1905 1906 *limit = nla_get_u32(attr); 1907 if (*limit > MPTCP_PM_ADDR_MAX) { 1908 GENL_SET_ERR_MSG(info, "limit greater than maximum"); 1909 return -EINVAL; 1910 } 1911 return 0; 1912 } 1913 1914 int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) 1915 { 1916 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1917 unsigned int rcv_addrs, subflows; 1918 int ret; 1919 1920 spin_lock_bh(&pernet->lock); 1921 rcv_addrs = pernet->add_addr_accept_max; 1922 ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); 1923 if (ret) 1924 goto unlock; 1925 1926 subflows = pernet->subflows_max; 1927 ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); 1928 if (ret) 1929 goto unlock; 1930 1931 WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs); 1932 WRITE_ONCE(pernet->subflows_max, subflows); 1933 1934 unlock: 1935 spin_unlock_bh(&pernet->lock); 1936 return ret; 1937 } 1938 1939 int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) 1940 { 1941 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1942 struct sk_buff *msg; 1943 void *reply; 1944 1945 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1946 if (!msg) 1947 return -ENOMEM; 1948 1949 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1950 MPTCP_PM_CMD_GET_LIMITS); 1951 if (!reply) 1952 goto fail; 1953 1954 if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, 1955 READ_ONCE(pernet->add_addr_accept_max))) 1956 goto fail; 1957 1958 if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, 1959 READ_ONCE(pernet->subflows_max))) 1960 goto fail; 1961 1962 genlmsg_end(msg, reply); 1963 return genlmsg_reply(msg, info); 1964 1965 fail: 1966 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1967 nlmsg_free(msg); 1968 return -EMSGSIZE; 1969 } 1970 1971 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, 1972 struct mptcp_addr_info *addr) 1973 { 1974 struct mptcp_rm_list list = { .nr = 0 }; 1975 1976 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1977 1978 spin_lock_bh(&msk->pm.lock); 1979 mptcp_pm_nl_rm_subflow_received(msk, &list); 1980 __mark_subflow_endp_available(msk, list.ids[0]); 1981 mptcp_pm_create_subflow_or_signal_addr(msk); 1982 spin_unlock_bh(&msk->pm.lock); 1983 } 1984 1985 static int mptcp_nl_set_flags(struct net *net, 1986 struct mptcp_addr_info *addr, 1987 u8 bkup, u8 changed) 1988 { 1989 long s_slot = 0, s_num = 0; 1990 struct mptcp_sock *msk; 1991 int ret = -EINVAL; 1992 1993 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1994 struct sock *sk = (struct sock *)msk; 1995 1996 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1997 goto next; 1998 1999 lock_sock(sk); 2000 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) 2001 ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup); 2002 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) 2003 mptcp_pm_nl_fullmesh(msk, addr); 2004 release_sock(sk); 2005 2006 next: 2007 sock_put(sk); 2008 cond_resched(); 2009 } 2010 2011 return ret; 2012 } 2013 2014 int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info) 2015 { 2016 struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }; 2017 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; 2018 u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | 2019 MPTCP_PM_ADDR_FLAG_FULLMESH; 2020 struct net *net = sock_net(skb->sk); 2021 struct mptcp_pm_addr_entry *entry; 2022 struct pm_nl_pernet *pernet; 2023 u8 lookup_by_id = 0; 2024 u8 bkup = 0; 2025 int ret; 2026 2027 pernet = pm_nl_get_pernet(net); 2028 2029 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 2030 if (ret < 0) 2031 return ret; 2032 2033 if (addr.addr.family == AF_UNSPEC) { 2034 lookup_by_id = 1; 2035 if (!addr.addr.id) { 2036 GENL_SET_ERR_MSG(info, "missing required inputs"); 2037 return -EOPNOTSUPP; 2038 } 2039 } 2040 2041 if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) 2042 bkup = 1; 2043 2044 spin_lock_bh(&pernet->lock); 2045 entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) : 2046 __lookup_addr(pernet, &addr.addr); 2047 if (!entry) { 2048 spin_unlock_bh(&pernet->lock); 2049 GENL_SET_ERR_MSG(info, "address not found"); 2050 return -EINVAL; 2051 } 2052 if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && 2053 (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { 2054 spin_unlock_bh(&pernet->lock); 2055 GENL_SET_ERR_MSG(info, "invalid addr flags"); 2056 return -EINVAL; 2057 } 2058 2059 changed = (addr.flags ^ entry->flags) & mask; 2060 entry->flags = (entry->flags & ~mask) | (addr.flags & mask); 2061 addr = *entry; 2062 spin_unlock_bh(&pernet->lock); 2063 2064 mptcp_nl_set_flags(net, &addr.addr, bkup, changed); 2065 return 0; 2066 } 2067 2068 int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info) 2069 { 2070 return mptcp_pm_set_flags(skb, info); 2071 } 2072 2073 static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp) 2074 { 2075 genlmsg_multicast_netns(&mptcp_genl_family, net, 2076 nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp); 2077 } 2078 2079 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk) 2080 { 2081 return genl_has_listeners(&mptcp_genl_family, 2082 sock_net((const struct sock *)msk), 2083 MPTCP_PM_EV_GRP_OFFSET); 2084 } 2085 2086 static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk) 2087 { 2088 const struct inet_sock *issk = inet_sk(ssk); 2089 const struct mptcp_subflow_context *sf; 2090 2091 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) 2092 return -EMSGSIZE; 2093 2094 switch (ssk->sk_family) { 2095 case AF_INET: 2096 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) 2097 return -EMSGSIZE; 2098 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr)) 2099 return -EMSGSIZE; 2100 break; 2101 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2102 case AF_INET6: { 2103 const struct ipv6_pinfo *np = inet6_sk(ssk); 2104 2105 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) 2106 return -EMSGSIZE; 2107 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr)) 2108 return -EMSGSIZE; 2109 break; 2110 } 2111 #endif 2112 default: 2113 WARN_ON_ONCE(1); 2114 return -EMSGSIZE; 2115 } 2116 2117 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) 2118 return -EMSGSIZE; 2119 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport)) 2120 return -EMSGSIZE; 2121 2122 sf = mptcp_subflow_ctx(ssk); 2123 if (WARN_ON_ONCE(!sf)) 2124 return -EINVAL; 2125 2126 if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf))) 2127 return -EMSGSIZE; 2128 2129 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id)) 2130 return -EMSGSIZE; 2131 2132 return 0; 2133 } 2134 2135 static int mptcp_event_put_token_and_ssk(struct sk_buff *skb, 2136 const struct mptcp_sock *msk, 2137 const struct sock *ssk) 2138 { 2139 const struct sock *sk = (const struct sock *)msk; 2140 const struct mptcp_subflow_context *sf; 2141 u8 sk_err; 2142 2143 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2144 return -EMSGSIZE; 2145 2146 if (mptcp_event_add_subflow(skb, ssk)) 2147 return -EMSGSIZE; 2148 2149 sf = mptcp_subflow_ctx(ssk); 2150 if (WARN_ON_ONCE(!sf)) 2151 return -EINVAL; 2152 2153 if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup)) 2154 return -EMSGSIZE; 2155 2156 if (ssk->sk_bound_dev_if && 2157 nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if)) 2158 return -EMSGSIZE; 2159 2160 sk_err = READ_ONCE(ssk->sk_err); 2161 if (sk_err && sk->sk_state == TCP_ESTABLISHED && 2162 nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err)) 2163 return -EMSGSIZE; 2164 2165 return 0; 2166 } 2167 2168 static int mptcp_event_sub_established(struct sk_buff *skb, 2169 const struct mptcp_sock *msk, 2170 const struct sock *ssk) 2171 { 2172 return mptcp_event_put_token_and_ssk(skb, msk, ssk); 2173 } 2174 2175 static int mptcp_event_sub_closed(struct sk_buff *skb, 2176 const struct mptcp_sock *msk, 2177 const struct sock *ssk) 2178 { 2179 const struct mptcp_subflow_context *sf; 2180 2181 if (mptcp_event_put_token_and_ssk(skb, msk, ssk)) 2182 return -EMSGSIZE; 2183 2184 sf = mptcp_subflow_ctx(ssk); 2185 if (!sf->reset_seen) 2186 return 0; 2187 2188 if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason)) 2189 return -EMSGSIZE; 2190 2191 if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient)) 2192 return -EMSGSIZE; 2193 2194 return 0; 2195 } 2196 2197 static int mptcp_event_created(struct sk_buff *skb, 2198 const struct mptcp_sock *msk, 2199 const struct sock *ssk) 2200 { 2201 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)); 2202 2203 if (err) 2204 return err; 2205 2206 if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) 2207 return -EMSGSIZE; 2208 2209 return mptcp_event_add_subflow(skb, ssk); 2210 } 2211 2212 void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id) 2213 { 2214 struct net *net = sock_net((const struct sock *)msk); 2215 struct nlmsghdr *nlh; 2216 struct sk_buff *skb; 2217 2218 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2219 return; 2220 2221 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 2222 if (!skb) 2223 return; 2224 2225 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED); 2226 if (!nlh) 2227 goto nla_put_failure; 2228 2229 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2230 goto nla_put_failure; 2231 2232 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id)) 2233 goto nla_put_failure; 2234 2235 genlmsg_end(skb, nlh); 2236 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); 2237 return; 2238 2239 nla_put_failure: 2240 nlmsg_free(skb); 2241 } 2242 2243 void mptcp_event_addr_announced(const struct sock *ssk, 2244 const struct mptcp_addr_info *info) 2245 { 2246 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 2247 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 2248 struct net *net = sock_net(ssk); 2249 struct nlmsghdr *nlh; 2250 struct sk_buff *skb; 2251 2252 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2253 return; 2254 2255 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 2256 if (!skb) 2257 return; 2258 2259 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, 2260 MPTCP_EVENT_ANNOUNCED); 2261 if (!nlh) 2262 goto nla_put_failure; 2263 2264 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) 2265 goto nla_put_failure; 2266 2267 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id)) 2268 goto nla_put_failure; 2269 2270 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, 2271 info->port == 0 ? 2272 inet_sk(ssk)->inet_dport : 2273 info->port)) 2274 goto nla_put_failure; 2275 2276 switch (info->family) { 2277 case AF_INET: 2278 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr)) 2279 goto nla_put_failure; 2280 break; 2281 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2282 case AF_INET6: 2283 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6)) 2284 goto nla_put_failure; 2285 break; 2286 #endif 2287 default: 2288 WARN_ON_ONCE(1); 2289 goto nla_put_failure; 2290 } 2291 2292 genlmsg_end(skb, nlh); 2293 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); 2294 return; 2295 2296 nla_put_failure: 2297 nlmsg_free(skb); 2298 } 2299 2300 void mptcp_event_pm_listener(const struct sock *ssk, 2301 enum mptcp_event_type event) 2302 { 2303 const struct inet_sock *issk = inet_sk(ssk); 2304 struct net *net = sock_net(ssk); 2305 struct nlmsghdr *nlh; 2306 struct sk_buff *skb; 2307 2308 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2309 return; 2310 2311 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2312 if (!skb) 2313 return; 2314 2315 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, event); 2316 if (!nlh) 2317 goto nla_put_failure; 2318 2319 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) 2320 goto nla_put_failure; 2321 2322 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) 2323 goto nla_put_failure; 2324 2325 switch (ssk->sk_family) { 2326 case AF_INET: 2327 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) 2328 goto nla_put_failure; 2329 break; 2330 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2331 case AF_INET6: { 2332 const struct ipv6_pinfo *np = inet6_sk(ssk); 2333 2334 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) 2335 goto nla_put_failure; 2336 break; 2337 } 2338 #endif 2339 default: 2340 WARN_ON_ONCE(1); 2341 goto nla_put_failure; 2342 } 2343 2344 genlmsg_end(skb, nlh); 2345 mptcp_nl_mcast_send(net, skb, GFP_KERNEL); 2346 return; 2347 2348 nla_put_failure: 2349 nlmsg_free(skb); 2350 } 2351 2352 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, 2353 const struct sock *ssk, gfp_t gfp) 2354 { 2355 struct net *net = sock_net((const struct sock *)msk); 2356 struct nlmsghdr *nlh; 2357 struct sk_buff *skb; 2358 2359 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) 2360 return; 2361 2362 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 2363 if (!skb) 2364 return; 2365 2366 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type); 2367 if (!nlh) 2368 goto nla_put_failure; 2369 2370 switch (type) { 2371 case MPTCP_EVENT_UNSPEC: 2372 WARN_ON_ONCE(1); 2373 break; 2374 case MPTCP_EVENT_CREATED: 2375 case MPTCP_EVENT_ESTABLISHED: 2376 if (mptcp_event_created(skb, msk, ssk) < 0) 2377 goto nla_put_failure; 2378 break; 2379 case MPTCP_EVENT_CLOSED: 2380 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0) 2381 goto nla_put_failure; 2382 break; 2383 case MPTCP_EVENT_ANNOUNCED: 2384 case MPTCP_EVENT_REMOVED: 2385 /* call mptcp_event_addr_announced()/removed instead */ 2386 WARN_ON_ONCE(1); 2387 break; 2388 case MPTCP_EVENT_SUB_ESTABLISHED: 2389 case MPTCP_EVENT_SUB_PRIORITY: 2390 if (mptcp_event_sub_established(skb, msk, ssk) < 0) 2391 goto nla_put_failure; 2392 break; 2393 case MPTCP_EVENT_SUB_CLOSED: 2394 if (mptcp_event_sub_closed(skb, msk, ssk) < 0) 2395 goto nla_put_failure; 2396 break; 2397 case MPTCP_EVENT_LISTENER_CREATED: 2398 case MPTCP_EVENT_LISTENER_CLOSED: 2399 break; 2400 } 2401 2402 genlmsg_end(skb, nlh); 2403 mptcp_nl_mcast_send(net, skb, gfp); 2404 return; 2405 2406 nla_put_failure: 2407 nlmsg_free(skb); 2408 } 2409 2410 struct genl_family mptcp_genl_family __ro_after_init = { 2411 .name = MPTCP_PM_NAME, 2412 .version = MPTCP_PM_VER, 2413 .netnsok = true, 2414 .module = THIS_MODULE, 2415 .ops = mptcp_pm_nl_ops, 2416 .n_ops = ARRAY_SIZE(mptcp_pm_nl_ops), 2417 .resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1, 2418 .mcgrps = mptcp_pm_mcgrps, 2419 .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps), 2420 }; 2421 2422 static int __net_init pm_nl_init_net(struct net *net) 2423 { 2424 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 2425 2426 INIT_LIST_HEAD_RCU(&pernet->local_addr_list); 2427 2428 /* Cit. 2 subflows ought to be enough for anybody. */ 2429 pernet->subflows_max = 2; 2430 pernet->next_id = 1; 2431 pernet->stale_loss_cnt = 4; 2432 spin_lock_init(&pernet->lock); 2433 2434 /* No need to initialize other pernet fields, the struct is zeroed at 2435 * allocation time. 2436 */ 2437 2438 return 0; 2439 } 2440 2441 static void __net_exit pm_nl_exit_net(struct list_head *net_list) 2442 { 2443 struct net *net; 2444 2445 list_for_each_entry(net, net_list, exit_list) { 2446 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 2447 2448 /* net is removed from namespace list, can't race with 2449 * other modifiers, also netns core already waited for a 2450 * RCU grace period. 2451 */ 2452 __flush_addrs(&pernet->local_addr_list); 2453 } 2454 } 2455 2456 static struct pernet_operations mptcp_pm_pernet_ops = { 2457 .init = pm_nl_init_net, 2458 .exit_batch = pm_nl_exit_net, 2459 .id = &pm_nl_pernet_id, 2460 .size = sizeof(struct pm_nl_pernet), 2461 }; 2462 2463 void __init mptcp_pm_nl_init(void) 2464 { 2465 if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) 2466 panic("Failed to register MPTCP PM pernet subsystem.\n"); 2467 2468 if (genl_register_family(&mptcp_genl_family)) 2469 panic("Failed to register MPTCP PM netlink family\n"); 2470 } 2471