1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2025, Matthieu Baerts. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <net/netns/generic.h> 10 11 #include "protocol.h" 12 #include "mib.h" 13 #include "mptcp_pm_gen.h" 14 15 static int pm_nl_pernet_id; 16 17 struct pm_nl_pernet { 18 /* protects pernet updates */ 19 spinlock_t lock; 20 struct list_head endp_list; 21 u8 endpoints; 22 u8 endp_signal_max; 23 u8 endp_subflow_max; 24 u8 endp_laminar_max; 25 u8 endp_fullmesh_max; 26 u8 limit_add_addr_accepted; 27 u8 limit_extra_subflows; 28 u8 next_id; 29 DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 30 }; 31 32 #define MPTCP_PM_ADDR_MAX 8 33 34 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) 35 { 36 return net_generic(net, pm_nl_pernet_id); 37 } 38 39 static struct pm_nl_pernet * 40 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) 41 { 42 return pm_nl_get_pernet(sock_net((struct sock *)msk)); 43 } 44 45 static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) 46 { 47 return pm_nl_get_pernet(genl_info_net(info)); 48 } 49 50 u8 mptcp_pm_get_endp_signal_max(const struct mptcp_sock *msk) 51 { 52 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 53 54 return READ_ONCE(pernet->endp_signal_max); 55 } 56 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_signal_max); 57 58 u8 mptcp_pm_get_endp_subflow_max(const struct mptcp_sock *msk) 59 { 60 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 61 62 return READ_ONCE(pernet->endp_subflow_max); 63 } 64 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_subflow_max); 65 66 u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk) 67 { 68 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 69 70 return READ_ONCE(pernet->endp_laminar_max); 71 } 72 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_laminar_max); 73 74 u8 mptcp_pm_get_endp_fullmesh_max(const struct mptcp_sock *msk) 75 { 76 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 77 78 return READ_ONCE(pernet->endp_fullmesh_max); 79 } 80 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_fullmesh_max); 81 82 u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk) 83 { 84 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 85 86 return READ_ONCE(pernet->limit_add_addr_accepted); 87 } 88 EXPORT_SYMBOL_GPL(mptcp_pm_get_limit_add_addr_accepted); 89 90 u8 mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk) 91 { 92 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 93 94 return READ_ONCE(pernet->limit_extra_subflows); 95 } 96 EXPORT_SYMBOL_GPL(mptcp_pm_get_limit_extra_subflows); 97 98 static bool lookup_subflow_by_daddr(const struct list_head *list, 99 const struct mptcp_addr_info *daddr) 100 { 101 struct mptcp_subflow_context *subflow; 102 struct mptcp_addr_info cur; 103 104 list_for_each_entry(subflow, list, node) { 105 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 106 107 if (!((1 << inet_sk_state_load(ssk)) & 108 (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) 109 continue; 110 111 mptcp_remote_address((struct sock_common *)ssk, &cur); 112 if (mptcp_addresses_equal(&cur, daddr, daddr->port)) 113 return true; 114 } 115 116 return false; 117 } 118 119 static bool 120 select_local_address(const struct pm_nl_pernet *pernet, 121 const struct mptcp_sock *msk, 122 struct mptcp_pm_local *new_local) 123 { 124 struct mptcp_pm_addr_entry *entry; 125 bool found = false; 126 127 msk_owned_by_me(msk); 128 129 rcu_read_lock(); 130 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 131 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) 132 continue; 133 134 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 135 continue; 136 137 new_local->addr = entry->addr; 138 new_local->flags = entry->flags; 139 new_local->ifindex = entry->ifindex; 140 found = true; 141 break; 142 } 143 rcu_read_unlock(); 144 145 return found; 146 } 147 148 static bool 149 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, 150 struct mptcp_pm_local *new_local) 151 { 152 struct mptcp_pm_addr_entry *entry; 153 bool found = false; 154 155 rcu_read_lock(); 156 /* do not keep any additional per socket state, just signal 157 * the address list in order. 158 * Note: removal from the local address list during the msk life-cycle 159 * can lead to additional addresses not being announced. 160 */ 161 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 162 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 163 continue; 164 165 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) 166 continue; 167 168 new_local->addr = entry->addr; 169 new_local->flags = entry->flags; 170 new_local->ifindex = entry->ifindex; 171 found = true; 172 break; 173 } 174 rcu_read_unlock(); 175 176 return found; 177 } 178 179 static unsigned int 180 fill_remote_addr(struct mptcp_sock *msk, struct mptcp_addr_info *local, 181 struct mptcp_addr_info *addrs) 182 { 183 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); 184 struct mptcp_addr_info remote = { 0 }; 185 struct sock *sk = (struct sock *)msk; 186 187 if (deny_id0) 188 return 0; 189 190 mptcp_remote_address((struct sock_common *)sk, &remote); 191 192 if (!mptcp_pm_addr_families_match(sk, local, &remote)) 193 return 0; 194 195 msk->pm.extra_subflows++; 196 *addrs = remote; 197 198 return 1; 199 } 200 201 static unsigned int 202 fill_remote_addresses_fullmesh(struct mptcp_sock *msk, 203 struct mptcp_addr_info *local, 204 struct mptcp_addr_info *addrs) 205 { 206 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 207 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); 208 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 209 struct sock *sk = (struct sock *)msk, *ssk; 210 struct mptcp_subflow_context *subflow; 211 int i = 0; 212 213 /* Forbid creation of new subflows matching existing ones, possibly 214 * already created by incoming ADD_ADDR 215 */ 216 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 217 mptcp_for_each_subflow(msk, subflow) 218 if (READ_ONCE(subflow->local_id) == local->id) 219 __set_bit(subflow->remote_id, unavail_id); 220 221 mptcp_for_each_subflow(msk, subflow) { 222 ssk = mptcp_subflow_tcp_sock(subflow); 223 mptcp_remote_address((struct sock_common *)ssk, &addrs[i]); 224 addrs[i].id = READ_ONCE(subflow->remote_id); 225 if (deny_id0 && !addrs[i].id) 226 continue; 227 228 if (test_bit(addrs[i].id, unavail_id)) 229 continue; 230 231 if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) 232 continue; 233 234 /* forbid creating multiple address towards this id */ 235 __set_bit(addrs[i].id, unavail_id); 236 msk->pm.extra_subflows++; 237 i++; 238 239 if (msk->pm.extra_subflows >= limit_extra_subflows) 240 break; 241 } 242 243 return i; 244 } 245 246 /* Fill all the remote addresses into the array addrs[], 247 * and return the array size. 248 */ 249 static unsigned int 250 fill_remote_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *local, 251 bool fullmesh, struct mptcp_addr_info *addrs) 252 { 253 /* Non-fullmesh: fill in the single entry corresponding to the primary 254 * MPC subflow remote address, and return 1, corresponding to 1 entry. 255 */ 256 if (!fullmesh) 257 return fill_remote_addr(msk, local, addrs); 258 259 /* Fullmesh endpoint: fill all possible remote addresses */ 260 return fill_remote_addresses_fullmesh(msk, local, addrs); 261 } 262 263 static struct mptcp_pm_addr_entry * 264 __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) 265 { 266 struct mptcp_pm_addr_entry *entry; 267 268 list_for_each_entry_rcu(entry, &pernet->endp_list, list, 269 lockdep_is_held(&pernet->lock)) { 270 if (entry->addr.id == id) 271 return entry; 272 } 273 return NULL; 274 } 275 276 static struct mptcp_pm_addr_entry * 277 __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) 278 { 279 struct mptcp_pm_addr_entry *entry; 280 281 list_for_each_entry_rcu(entry, &pernet->endp_list, list, 282 lockdep_is_held(&pernet->lock)) { 283 if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) 284 return entry; 285 } 286 return NULL; 287 } 288 289 static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, 290 const struct mptcp_addr_info *addr) 291 { 292 return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; 293 } 294 295 /* Set mpc_endpoint_id, and send MP_PRIO for ID0 if needed */ 296 static void mptcp_mpc_endpoint_setup(struct mptcp_sock *msk) 297 { 298 struct mptcp_subflow_context *subflow; 299 struct mptcp_pm_addr_entry *entry; 300 struct mptcp_addr_info mpc_addr; 301 struct pm_nl_pernet *pernet; 302 bool backup = false; 303 304 /* do lazy endpoint usage accounting for the MPC subflows */ 305 if (likely(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED)) || 306 !msk->first) 307 return; 308 309 subflow = mptcp_subflow_ctx(msk->first); 310 pernet = pm_nl_get_pernet_from_msk(msk); 311 312 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); 313 rcu_read_lock(); 314 entry = __lookup_addr(pernet, &mpc_addr); 315 if (entry) { 316 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); 317 msk->mpc_endpoint_id = entry->addr.id; 318 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 319 } 320 rcu_read_unlock(); 321 322 /* Send MP_PRIO */ 323 if (backup) 324 mptcp_pm_send_ack(msk, subflow, true, backup); 325 326 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); 327 } 328 329 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) 330 { 331 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 332 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 333 u8 endp_subflow_max = mptcp_pm_get_endp_subflow_max(msk); 334 u8 endp_signal_max = mptcp_pm_get_endp_signal_max(msk); 335 struct sock *sk = (struct sock *)msk; 336 bool signal_and_subflow = false; 337 struct mptcp_pm_local local; 338 339 mptcp_mpc_endpoint_setup(msk); 340 if (!mptcp_is_fully_established(sk)) 341 return; 342 343 pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", 344 msk->pm.local_addr_used, endp_subflow_max, 345 msk->pm.add_addr_signaled, endp_signal_max, 346 msk->pm.extra_subflows, limit_extra_subflows); 347 348 /* check first for announce */ 349 if (msk->pm.add_addr_signaled < endp_signal_max) { 350 /* due to racing events on both ends we can reach here while 351 * previous add address is still running: if we invoke now 352 * mptcp_pm_announce_addr(), that will fail and the 353 * corresponding id will be marked as used. 354 * Instead let the PM machinery reschedule us when the 355 * current address announce will be completed. 356 */ 357 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) 358 return; 359 360 if (!select_signal_address(pernet, msk, &local)) 361 goto subflow; 362 363 /* If the alloc fails, we are on memory pressure, not worth 364 * continuing, and trying to create subflows. 365 */ 366 if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) 367 return; 368 369 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 370 msk->pm.add_addr_signaled++; 371 372 /* Special case for ID0: set the correct ID */ 373 if (local.addr.id == msk->mpc_endpoint_id) 374 local.addr.id = 0; 375 376 mptcp_pm_announce_addr(msk, &local.addr, false); 377 mptcp_pm_addr_send_ack(msk); 378 379 if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 380 signal_and_subflow = true; 381 } 382 383 subflow: 384 /* No need to try establishing subflows to remote id0 if not allowed */ 385 if (mptcp_pm_add_addr_c_flag_case(msk)) 386 goto exit; 387 388 /* check if should create a new subflow */ 389 while (msk->pm.local_addr_used < endp_subflow_max && 390 msk->pm.extra_subflows < limit_extra_subflows) { 391 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; 392 bool fullmesh; 393 int i, nr; 394 395 if (signal_and_subflow) 396 signal_and_subflow = false; 397 else if (!select_local_address(pernet, msk, &local)) 398 break; 399 400 fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); 401 402 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 403 404 /* Special case for ID0: set the correct ID */ 405 if (local.addr.id == msk->mpc_endpoint_id) 406 local.addr.id = 0; 407 else /* local_addr_used is not decr for ID 0 */ 408 msk->pm.local_addr_used++; 409 410 nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); 411 if (nr == 0) 412 continue; 413 414 spin_unlock_bh(&msk->pm.lock); 415 for (i = 0; i < nr; i++) 416 __mptcp_subflow_connect(sk, &local, &addrs[i]); 417 spin_lock_bh(&msk->pm.lock); 418 } 419 420 exit: 421 /* If an endpoint has both the signal and subflow flags, but it is not 422 * possible to create subflows -- the 'while' loop body above never 423 * executed -- then still mark the endp as used, which is somehow the 424 * case. This avoids issues later when removing the endpoint and calling 425 * __mark_subflow_endp_available(), which expects the increment here. 426 */ 427 if (signal_and_subflow && local.addr.id != msk->mpc_endpoint_id) 428 msk->pm.local_addr_used++; 429 430 mptcp_pm_nl_check_work_pending(msk); 431 } 432 433 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) 434 { 435 mptcp_pm_create_subflow_or_signal_addr(msk); 436 } 437 438 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) 439 { 440 mptcp_pm_create_subflow_or_signal_addr(msk); 441 } 442 443 static unsigned int 444 fill_local_addresses_vec_fullmesh(struct mptcp_sock *msk, 445 struct mptcp_addr_info *remote, 446 struct mptcp_pm_local *locals, 447 bool c_flag_case) 448 { 449 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 450 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 451 struct sock *sk = (struct sock *)msk; 452 struct mptcp_pm_addr_entry *entry; 453 struct mptcp_pm_local *local; 454 int i = 0; 455 456 rcu_read_lock(); 457 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 458 bool is_id0; 459 460 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) 461 continue; 462 463 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) 464 continue; 465 466 local = &locals[i]; 467 local->addr = entry->addr; 468 local->flags = entry->flags; 469 local->ifindex = entry->ifindex; 470 471 is_id0 = local->addr.id == msk->mpc_endpoint_id; 472 473 if (c_flag_case && 474 (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) { 475 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 476 477 if (!is_id0) 478 msk->pm.local_addr_used++; 479 } 480 481 /* Special case for ID0: set the correct ID */ 482 if (is_id0) 483 local->addr.id = 0; 484 485 msk->pm.extra_subflows++; 486 i++; 487 488 if (msk->pm.extra_subflows >= limit_extra_subflows) 489 break; 490 } 491 rcu_read_unlock(); 492 493 return i; 494 } 495 496 static unsigned int 497 fill_local_laminar_endp(struct mptcp_sock *msk, struct mptcp_addr_info *remote, 498 struct mptcp_pm_local *locals) 499 { 500 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 501 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 502 struct mptcp_subflow_context *subflow; 503 struct sock *sk = (struct sock *)msk; 504 struct mptcp_pm_addr_entry *entry; 505 struct mptcp_pm_local *local; 506 int found = 0; 507 508 /* Forbid creation of new subflows matching existing ones, possibly 509 * already created by 'subflow' endpoints 510 */ 511 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 512 mptcp_for_each_subflow(msk, subflow) { 513 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 514 515 if ((1 << inet_sk_state_load(ssk)) & 516 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | 517 TCPF_CLOSE)) 518 continue; 519 520 __set_bit(subflow_get_local_id(subflow), unavail_id); 521 } 522 523 rcu_read_lock(); 524 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 525 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR)) 526 continue; 527 528 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) 529 continue; 530 531 if (test_bit(mptcp_endp_get_local_id(msk, &entry->addr), 532 unavail_id)) 533 continue; 534 535 local = &locals[0]; 536 local->addr = entry->addr; 537 local->flags = entry->flags; 538 local->ifindex = entry->ifindex; 539 540 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 541 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 542 543 if (local->addr.id != msk->mpc_endpoint_id) 544 msk->pm.local_addr_used++; 545 } 546 547 msk->pm.extra_subflows++; 548 found = 1; 549 break; 550 } 551 rcu_read_unlock(); 552 553 return found; 554 } 555 556 static unsigned int 557 fill_local_addresses_vec_c_flag(struct mptcp_sock *msk, 558 struct mptcp_addr_info *remote, 559 struct mptcp_pm_local *locals) 560 { 561 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 562 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 563 u8 endp_subflow_max = mptcp_pm_get_endp_subflow_max(msk); 564 struct sock *sk = (struct sock *)msk; 565 struct mptcp_pm_local *local; 566 int i = 0; 567 568 while (msk->pm.local_addr_used < endp_subflow_max) { 569 local = &locals[i]; 570 571 if (!select_local_address(pernet, msk, local)) 572 break; 573 574 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 575 576 if (!mptcp_pm_addr_families_match(sk, &local->addr, remote)) 577 continue; 578 579 if (local->addr.id == msk->mpc_endpoint_id) 580 continue; 581 582 msk->pm.local_addr_used++; 583 msk->pm.extra_subflows++; 584 i++; 585 586 if (msk->pm.extra_subflows >= limit_extra_subflows) 587 break; 588 } 589 590 return i; 591 } 592 593 static unsigned int 594 fill_local_address_any(struct mptcp_sock *msk, struct mptcp_addr_info *remote, 595 struct mptcp_pm_local *local) 596 { 597 struct sock *sk = (struct sock *)msk; 598 599 memset(local, 0, sizeof(*local)); 600 local->addr.family = 601 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 602 remote->family == AF_INET6 && 603 ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : 604 #endif 605 remote->family; 606 607 if (!mptcp_pm_addr_families_match(sk, &local->addr, remote)) 608 return 0; 609 610 msk->pm.extra_subflows++; 611 612 return 1; 613 } 614 615 /* Fill all the local addresses into the array addrs[], 616 * and return the array size. 617 */ 618 static unsigned int 619 fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *remote, 620 struct mptcp_pm_local *locals) 621 { 622 bool c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk); 623 624 /* If there is at least one MPTCP endpoint with a fullmesh flag */ 625 if (mptcp_pm_get_endp_fullmesh_max(msk)) 626 return fill_local_addresses_vec_fullmesh(msk, remote, locals, 627 c_flag_case); 628 629 /* If there is at least one MPTCP endpoint with a laminar flag */ 630 if (mptcp_pm_get_endp_laminar_max(msk)) 631 return fill_local_laminar_endp(msk, remote, locals); 632 633 /* Special case: peer sets the C flag, accept one ADD_ADDR if default 634 * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints 635 */ 636 if (c_flag_case) 637 return fill_local_addresses_vec_c_flag(msk, remote, locals); 638 639 /* No special case: fill in the single 'IPADDRANY' local address */ 640 return fill_local_address_any(msk, remote, &locals[0]); 641 } 642 643 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) 644 { 645 u8 limit_add_addr_accepted = mptcp_pm_get_limit_add_addr_accepted(msk); 646 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 647 struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX]; 648 struct sock *sk = (struct sock *)msk; 649 struct mptcp_addr_info remote; 650 bool sf_created = false; 651 int i, nr; 652 653 pr_debug("accepted %d:%d remote family %d\n", 654 msk->pm.add_addr_accepted, limit_add_addr_accepted, 655 msk->pm.remote.family); 656 657 remote = msk->pm.remote; 658 mptcp_pm_announce_addr(msk, &remote, true); 659 mptcp_pm_addr_send_ack(msk); 660 mptcp_mpc_endpoint_setup(msk); 661 662 if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) 663 return; 664 665 /* pick id 0 port, if none is provided the remote address */ 666 if (!remote.port) 667 remote.port = sk->sk_dport; 668 669 /* connect to the specified remote address, using whatever 670 * local address the routing configuration will pick. 671 */ 672 nr = fill_local_addresses_vec(msk, &remote, locals); 673 if (nr == 0) 674 return; 675 676 spin_unlock_bh(&msk->pm.lock); 677 for (i = 0; i < nr; i++) 678 if (__mptcp_subflow_connect(sk, &locals[i], &remote) == 0) 679 sf_created = true; 680 spin_lock_bh(&msk->pm.lock); 681 682 if (sf_created) { 683 /* add_addr_accepted is not decr for ID 0 */ 684 if (remote.id) 685 msk->pm.add_addr_accepted++; 686 if (msk->pm.add_addr_accepted >= limit_add_addr_accepted || 687 msk->pm.extra_subflows >= limit_extra_subflows) 688 WRITE_ONCE(msk->pm.accept_addr, false); 689 } 690 } 691 692 void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id) 693 { 694 if (rm_id && !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { 695 u8 limit_add_addr_accepted = 696 mptcp_pm_get_limit_add_addr_accepted(msk); 697 698 /* Note: if the subflow has been closed before, this 699 * add_addr_accepted counter will not be decremented. 700 */ 701 if (--msk->pm.add_addr_accepted < limit_add_addr_accepted) 702 WRITE_ONCE(msk->pm.accept_addr, true); 703 } 704 } 705 706 static bool address_use_port(struct mptcp_pm_addr_entry *entry) 707 { 708 return (entry->flags & 709 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == 710 MPTCP_PM_ADDR_FLAG_SIGNAL; 711 } 712 713 /* caller must ensure the RCU grace period is already elapsed */ 714 static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) 715 { 716 if (entry->lsk) 717 sock_release(entry->lsk); 718 kfree(entry); 719 } 720 721 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, 722 struct mptcp_pm_addr_entry *entry, 723 bool replace) 724 { 725 struct mptcp_pm_addr_entry *cur, *del_entry = NULL; 726 int ret = -EINVAL; 727 u8 addr_max; 728 729 spin_lock_bh(&pernet->lock); 730 /* to keep the code simple, don't do IDR-like allocation for address ID, 731 * just bail when we exceed limits 732 */ 733 if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) 734 pernet->next_id = 1; 735 if (pernet->endpoints >= MPTCP_PM_ADDR_MAX) { 736 ret = -ERANGE; 737 goto out; 738 } 739 if (test_bit(entry->addr.id, pernet->id_bitmap)) { 740 ret = -EBUSY; 741 goto out; 742 } 743 744 /* do not insert duplicate address, differentiate on port only 745 * singled addresses 746 */ 747 if (!address_use_port(entry)) 748 entry->addr.port = 0; 749 list_for_each_entry(cur, &pernet->endp_list, list) { 750 if (mptcp_addresses_equal(&cur->addr, &entry->addr, 751 cur->addr.port || entry->addr.port)) { 752 /* allow replacing the exiting endpoint only if such 753 * endpoint is an implicit one and the user-space 754 * did not provide an endpoint id 755 */ 756 if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { 757 ret = -EEXIST; 758 goto out; 759 } 760 if (entry->addr.id) 761 goto out; 762 763 /* allow callers that only need to look up the local 764 * addr's id to skip replacement. This allows them to 765 * avoid calling synchronize_rcu in the packet recv 766 * path. 767 */ 768 if (!replace) { 769 kfree(entry); 770 ret = cur->addr.id; 771 goto out; 772 } 773 774 pernet->endpoints--; 775 entry->addr.id = cur->addr.id; 776 list_del_rcu(&cur->list); 777 del_entry = cur; 778 break; 779 } 780 } 781 782 if (!entry->addr.id) { 783 find_next: 784 entry->addr.id = find_next_zero_bit(pernet->id_bitmap, 785 MPTCP_PM_MAX_ADDR_ID + 1, 786 pernet->next_id); 787 if (!entry->addr.id && pernet->next_id != 1) { 788 pernet->next_id = 1; 789 goto find_next; 790 } 791 } 792 793 if (!entry->addr.id) 794 goto out; 795 796 __set_bit(entry->addr.id, pernet->id_bitmap); 797 if (entry->addr.id > pernet->next_id) 798 pernet->next_id = entry->addr.id; 799 800 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 801 addr_max = pernet->endp_signal_max; 802 WRITE_ONCE(pernet->endp_signal_max, addr_max + 1); 803 } 804 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 805 addr_max = pernet->endp_subflow_max; 806 WRITE_ONCE(pernet->endp_subflow_max, addr_max + 1); 807 } 808 if (entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR) { 809 addr_max = pernet->endp_laminar_max; 810 WRITE_ONCE(pernet->endp_laminar_max, addr_max + 1); 811 } 812 if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 813 addr_max = pernet->endp_fullmesh_max; 814 WRITE_ONCE(pernet->endp_fullmesh_max, addr_max + 1); 815 } 816 817 pernet->endpoints++; 818 if (!entry->addr.port) 819 list_add_tail_rcu(&entry->list, &pernet->endp_list); 820 else 821 list_add_rcu(&entry->list, &pernet->endp_list); 822 ret = entry->addr.id; 823 824 out: 825 spin_unlock_bh(&pernet->lock); 826 827 /* just replaced an existing entry, free it */ 828 if (del_entry) { 829 synchronize_rcu(); 830 __mptcp_pm_release_addr_entry(del_entry); 831 } 832 return ret; 833 } 834 835 static struct lock_class_key mptcp_slock_keys[2]; 836 static struct lock_class_key mptcp_keys[2]; 837 838 static int mptcp_pm_nl_create_listen_socket(struct sock *sk, 839 struct mptcp_pm_addr_entry *entry) 840 { 841 bool is_ipv6 = entry->addr.family == AF_INET6; 842 int addrlen = sizeof(struct sockaddr_in); 843 struct sockaddr_storage addr; 844 struct sock *newsk, *ssk; 845 int backlog = 1024; 846 int err; 847 848 err = sock_create_kern(sock_net(sk), entry->addr.family, 849 SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); 850 if (err) 851 return err; 852 853 newsk = entry->lsk->sk; 854 if (!newsk) 855 return -EINVAL; 856 857 /* The subflow socket lock is acquired in a nested to the msk one 858 * in several places, even by the TCP stack, and this msk is a kernel 859 * socket: lockdep complains. Instead of propagating the _nested 860 * modifiers in several places, re-init the lock class for the msk 861 * socket to an mptcp specific one. 862 */ 863 sock_lock_init_class_and_name(newsk, 864 is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", 865 &mptcp_slock_keys[is_ipv6], 866 is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", 867 &mptcp_keys[is_ipv6]); 868 869 lock_sock(newsk); 870 ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); 871 release_sock(newsk); 872 if (IS_ERR(ssk)) 873 return PTR_ERR(ssk); 874 875 mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); 876 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 877 if (entry->addr.family == AF_INET6) 878 addrlen = sizeof(struct sockaddr_in6); 879 #endif 880 if (ssk->sk_family == AF_INET) 881 err = inet_bind_sk(ssk, (struct sockaddr_unsized *)&addr, addrlen); 882 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 883 else if (ssk->sk_family == AF_INET6) 884 err = inet6_bind_sk(ssk, (struct sockaddr_unsized *)&addr, addrlen); 885 #endif 886 if (err) 887 return err; 888 889 /* We don't use mptcp_set_state() here because it needs to be called 890 * under the msk socket lock. For the moment, that will not bring 891 * anything more than only calling inet_sk_state_store(), because the 892 * old status is known (TCP_CLOSE). 893 */ 894 inet_sk_state_store(newsk, TCP_LISTEN); 895 lock_sock(ssk); 896 WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true); 897 err = __inet_listen_sk(ssk, backlog); 898 if (!err) 899 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); 900 release_sock(ssk); 901 return err; 902 } 903 904 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, 905 struct mptcp_pm_addr_entry *skc) 906 { 907 struct mptcp_pm_addr_entry *entry; 908 struct pm_nl_pernet *pernet; 909 int ret; 910 911 pernet = pm_nl_get_pernet_from_msk(msk); 912 913 rcu_read_lock(); 914 entry = __lookup_addr(pernet, &skc->addr); 915 ret = entry ? entry->addr.id : -1; 916 rcu_read_unlock(); 917 if (ret >= 0) 918 return ret; 919 920 /* address not found, add to local list */ 921 entry = kmemdup(skc, sizeof(*skc), GFP_ATOMIC); 922 if (!entry) 923 return -ENOMEM; 924 925 entry->addr.port = 0; 926 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, false); 927 if (ret < 0) 928 kfree(entry); 929 930 return ret; 931 } 932 933 bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 934 { 935 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 936 struct mptcp_pm_addr_entry *entry; 937 bool backup; 938 939 rcu_read_lock(); 940 entry = __lookup_addr(pernet, skc); 941 backup = entry && !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 942 rcu_read_unlock(); 943 944 return backup; 945 } 946 947 static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, 948 struct mptcp_addr_info *addr) 949 { 950 struct mptcp_sock *msk; 951 long s_slot = 0, s_num = 0; 952 953 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 954 struct sock *sk = (struct sock *)msk; 955 struct mptcp_addr_info mpc_addr; 956 957 if (!READ_ONCE(msk->fully_established) || 958 mptcp_pm_is_userspace(msk)) 959 goto next; 960 961 /* if the endp linked to the init sf is re-added with a != ID */ 962 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 963 964 lock_sock(sk); 965 spin_lock_bh(&msk->pm.lock); 966 if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) 967 msk->mpc_endpoint_id = addr->id; 968 mptcp_pm_create_subflow_or_signal_addr(msk); 969 spin_unlock_bh(&msk->pm.lock); 970 release_sock(sk); 971 972 next: 973 sock_put(sk); 974 cond_resched(); 975 } 976 977 return 0; 978 } 979 980 /* Add an MPTCP endpoint */ 981 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) 982 { 983 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 984 struct mptcp_pm_addr_entry addr, *entry; 985 struct nlattr *attr; 986 int ret; 987 988 if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) 989 return -EINVAL; 990 991 attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 992 ret = mptcp_pm_parse_entry(attr, info, true, &addr); 993 if (ret < 0) 994 return ret; 995 996 if (addr.addr.port && !address_use_port(&addr)) { 997 NL_SET_ERR_MSG_ATTR(info->extack, attr, 998 "flags must have signal and not subflow when using port"); 999 return -EINVAL; 1000 } 1001 1002 if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && 1003 addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1004 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1005 "flags mustn't have both signal and fullmesh"); 1006 return -EINVAL; 1007 } 1008 1009 if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { 1010 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1011 "can't create IMPLICIT endpoint"); 1012 return -EINVAL; 1013 } 1014 1015 entry = kmemdup(&addr, sizeof(addr), GFP_KERNEL_ACCOUNT); 1016 if (!entry) { 1017 GENL_SET_ERR_MSG(info, "can't allocate addr"); 1018 return -ENOMEM; 1019 } 1020 1021 if (entry->addr.port) { 1022 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); 1023 if (ret) { 1024 GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); 1025 goto out_free; 1026 } 1027 } 1028 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); 1029 if (ret < 0) { 1030 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); 1031 goto out_free; 1032 } 1033 1034 mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); 1035 return 0; 1036 1037 out_free: 1038 __mptcp_pm_release_addr_entry(entry); 1039 return ret; 1040 } 1041 1042 static void mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, 1043 const struct mptcp_addr_info *addr, 1044 bool force) 1045 { 1046 struct mptcp_rm_list list = { .nr = 0 }; 1047 bool announced; 1048 1049 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1050 1051 announced = mptcp_remove_anno_list_by_saddr(msk, addr); 1052 if (announced || force) { 1053 spin_lock_bh(&msk->pm.lock); 1054 if (announced) 1055 msk->pm.add_addr_signaled--; 1056 mptcp_pm_remove_addr(msk, &list); 1057 spin_unlock_bh(&msk->pm.lock); 1058 } 1059 } 1060 1061 static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) 1062 { 1063 /* If it was marked as used, and not ID 0, decrement local_addr_used */ 1064 if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && 1065 id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) 1066 msk->pm.local_addr_used--; 1067 } 1068 1069 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, 1070 const struct mptcp_pm_addr_entry *entry) 1071 { 1072 const struct mptcp_addr_info *addr = &entry->addr; 1073 struct mptcp_rm_list list = { .nr = 1 }; 1074 long s_slot = 0, s_num = 0; 1075 struct mptcp_sock *msk; 1076 1077 pr_debug("remove_id=%d\n", addr->id); 1078 1079 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1080 struct sock *sk = (struct sock *)msk; 1081 bool remove_subflow; 1082 1083 if (mptcp_pm_is_userspace(msk)) 1084 goto next; 1085 1086 lock_sock(sk); 1087 remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr); 1088 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && 1089 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); 1090 1091 list.ids[0] = mptcp_endp_get_local_id(msk, addr); 1092 1093 spin_lock_bh(&msk->pm.lock); 1094 if (remove_subflow) 1095 mptcp_pm_rm_subflow(msk, &list); 1096 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 1097 __mark_subflow_endp_available(msk, list.ids[0]); 1098 else /* mark endp ID as available, e.g. Signal or MPC endp */ 1099 __set_bit(addr->id, msk->pm.id_avail_bitmap); 1100 spin_unlock_bh(&msk->pm.lock); 1101 1102 if (msk->mpc_endpoint_id == entry->addr.id) 1103 msk->mpc_endpoint_id = 0; 1104 release_sock(sk); 1105 1106 next: 1107 sock_put(sk); 1108 cond_resched(); 1109 } 1110 1111 return 0; 1112 } 1113 1114 static int mptcp_nl_remove_id_zero_address(struct net *net, 1115 struct mptcp_addr_info *addr) 1116 { 1117 struct mptcp_rm_list list = { .nr = 0 }; 1118 long s_slot = 0, s_num = 0; 1119 struct mptcp_sock *msk; 1120 1121 list.ids[list.nr++] = 0; 1122 1123 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1124 struct sock *sk = (struct sock *)msk; 1125 struct mptcp_addr_info msk_local; 1126 1127 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1128 goto next; 1129 1130 mptcp_local_address((struct sock_common *)msk, &msk_local); 1131 if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) 1132 goto next; 1133 1134 lock_sock(sk); 1135 spin_lock_bh(&msk->pm.lock); 1136 mptcp_pm_remove_addr(msk, &list); 1137 mptcp_pm_rm_subflow(msk, &list); 1138 __mark_subflow_endp_available(msk, 0); 1139 spin_unlock_bh(&msk->pm.lock); 1140 release_sock(sk); 1141 1142 next: 1143 sock_put(sk); 1144 cond_resched(); 1145 } 1146 1147 return 0; 1148 } 1149 1150 /* Remove an MPTCP endpoint */ 1151 int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) 1152 { 1153 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1154 struct mptcp_pm_addr_entry addr, *entry; 1155 struct nlattr *attr; 1156 u8 addr_max; 1157 int ret; 1158 1159 if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) 1160 return -EINVAL; 1161 1162 attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1163 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1164 if (ret < 0) 1165 return ret; 1166 1167 /* the zero id address is special: the first address used by the msk 1168 * always gets such an id, so different subflows can have different zero 1169 * id addresses. Additionally zero id is not accounted for in id_bitmap. 1170 * Let's use an 'mptcp_rm_list' instead of the common remove code. 1171 */ 1172 if (addr.addr.id == 0) 1173 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); 1174 1175 spin_lock_bh(&pernet->lock); 1176 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1177 if (!entry) { 1178 NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); 1179 spin_unlock_bh(&pernet->lock); 1180 return -EINVAL; 1181 } 1182 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1183 addr_max = pernet->endp_signal_max; 1184 WRITE_ONCE(pernet->endp_signal_max, addr_max - 1); 1185 } 1186 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1187 addr_max = pernet->endp_subflow_max; 1188 WRITE_ONCE(pernet->endp_subflow_max, addr_max - 1); 1189 } 1190 if (entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR) { 1191 addr_max = pernet->endp_laminar_max; 1192 WRITE_ONCE(pernet->endp_laminar_max, addr_max - 1); 1193 } 1194 if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1195 addr_max = pernet->endp_fullmesh_max; 1196 WRITE_ONCE(pernet->endp_fullmesh_max, addr_max - 1); 1197 } 1198 1199 pernet->endpoints--; 1200 list_del_rcu(&entry->list); 1201 __clear_bit(entry->addr.id, pernet->id_bitmap); 1202 spin_unlock_bh(&pernet->lock); 1203 1204 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); 1205 synchronize_rcu(); 1206 __mptcp_pm_release_addr_entry(entry); 1207 1208 return ret; 1209 } 1210 1211 static void mptcp_pm_flush_addrs_and_subflows(struct mptcp_sock *msk, 1212 struct list_head *rm_list) 1213 { 1214 struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; 1215 struct mptcp_pm_addr_entry *entry; 1216 1217 list_for_each_entry(entry, rm_list, list) { 1218 if (slist.nr < MPTCP_RM_IDS_MAX && 1219 mptcp_lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) 1220 slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1221 1222 if (alist.nr < MPTCP_RM_IDS_MAX && 1223 mptcp_remove_anno_list_by_saddr(msk, &entry->addr)) 1224 alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1225 } 1226 1227 spin_lock_bh(&msk->pm.lock); 1228 if (alist.nr) { 1229 msk->pm.add_addr_signaled -= alist.nr; 1230 mptcp_pm_remove_addr(msk, &alist); 1231 } 1232 if (slist.nr) 1233 mptcp_pm_rm_subflow(msk, &slist); 1234 /* Reset counters: maybe some subflows have been removed before */ 1235 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1236 msk->pm.local_addr_used = 0; 1237 spin_unlock_bh(&msk->pm.lock); 1238 } 1239 1240 static void mptcp_nl_flush_addrs_list(struct net *net, 1241 struct list_head *rm_list) 1242 { 1243 long s_slot = 0, s_num = 0; 1244 struct mptcp_sock *msk; 1245 1246 if (list_empty(rm_list)) 1247 return; 1248 1249 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1250 struct sock *sk = (struct sock *)msk; 1251 1252 if (!mptcp_pm_is_userspace(msk)) { 1253 lock_sock(sk); 1254 mptcp_pm_flush_addrs_and_subflows(msk, rm_list); 1255 release_sock(sk); 1256 } 1257 1258 sock_put(sk); 1259 cond_resched(); 1260 } 1261 } 1262 1263 /* caller must ensure the RCU grace period is already elapsed */ 1264 static void __flush_addrs(struct list_head *list) 1265 { 1266 while (!list_empty(list)) { 1267 struct mptcp_pm_addr_entry *cur; 1268 1269 cur = list_entry(list->next, 1270 struct mptcp_pm_addr_entry, list); 1271 list_del_rcu(&cur->list); 1272 __mptcp_pm_release_addr_entry(cur); 1273 } 1274 } 1275 1276 static void __reset_counters(struct pm_nl_pernet *pernet) 1277 { 1278 WRITE_ONCE(pernet->endp_signal_max, 0); 1279 WRITE_ONCE(pernet->endp_subflow_max, 0); 1280 WRITE_ONCE(pernet->endp_laminar_max, 0); 1281 pernet->endpoints = 0; 1282 } 1283 1284 int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) 1285 { 1286 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1287 struct list_head free_list; 1288 1289 spin_lock_bh(&pernet->lock); 1290 free_list = pernet->endp_list; 1291 INIT_LIST_HEAD_RCU(&pernet->endp_list); 1292 __reset_counters(pernet); 1293 pernet->next_id = 1; 1294 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1295 spin_unlock_bh(&pernet->lock); 1296 1297 if (free_list.next == &pernet->endp_list) 1298 return 0; 1299 1300 synchronize_rcu(); 1301 1302 /* Adjust the pointers to free_list instead of pernet->endp_list */ 1303 free_list.prev->next = &free_list; 1304 free_list.next->prev = &free_list; 1305 1306 mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); 1307 __flush_addrs(&free_list); 1308 return 0; 1309 } 1310 1311 int mptcp_pm_nl_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, 1312 struct genl_info *info) 1313 { 1314 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1315 struct mptcp_pm_addr_entry *entry; 1316 int ret = -EINVAL; 1317 1318 rcu_read_lock(); 1319 entry = __lookup_addr_by_id(pernet, id); 1320 if (entry) { 1321 *addr = *entry; 1322 ret = 0; 1323 } 1324 rcu_read_unlock(); 1325 1326 return ret; 1327 } 1328 1329 int mptcp_pm_nl_dump_addr(struct sk_buff *msg, 1330 struct netlink_callback *cb) 1331 { 1332 struct net *net = sock_net(msg->sk); 1333 struct mptcp_pm_addr_entry *entry; 1334 struct pm_nl_pernet *pernet; 1335 int id = cb->args[0]; 1336 int i; 1337 1338 pernet = pm_nl_get_pernet(net); 1339 1340 rcu_read_lock(); 1341 for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { 1342 if (test_bit(i, pernet->id_bitmap)) { 1343 entry = __lookup_addr_by_id(pernet, i); 1344 if (!entry) 1345 break; 1346 1347 if (entry->addr.id <= id) 1348 continue; 1349 1350 if (mptcp_pm_genl_fill_addr(msg, cb, entry) < 0) 1351 break; 1352 1353 id = entry->addr.id; 1354 } 1355 } 1356 rcu_read_unlock(); 1357 1358 cb->args[0] = id; 1359 return msg->len; 1360 } 1361 1362 static int parse_limit(struct genl_info *info, int id, unsigned int *limit) 1363 { 1364 struct nlattr *attr = info->attrs[id]; 1365 1366 if (!attr) 1367 return 0; 1368 1369 *limit = nla_get_u32(attr); 1370 if (*limit > MPTCP_PM_ADDR_MAX) { 1371 NL_SET_ERR_MSG_ATTR_FMT(info->extack, attr, 1372 "limit greater than maximum (%u)", 1373 MPTCP_PM_ADDR_MAX); 1374 return -EINVAL; 1375 } 1376 return 0; 1377 } 1378 1379 int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) 1380 { 1381 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1382 unsigned int rcv_addrs, subflows; 1383 int ret; 1384 1385 spin_lock_bh(&pernet->lock); 1386 rcv_addrs = pernet->limit_add_addr_accepted; 1387 ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); 1388 if (ret) 1389 goto unlock; 1390 1391 subflows = pernet->limit_extra_subflows; 1392 ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); 1393 if (ret) 1394 goto unlock; 1395 1396 WRITE_ONCE(pernet->limit_add_addr_accepted, rcv_addrs); 1397 WRITE_ONCE(pernet->limit_extra_subflows, subflows); 1398 1399 unlock: 1400 spin_unlock_bh(&pernet->lock); 1401 return ret; 1402 } 1403 1404 int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) 1405 { 1406 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1407 struct sk_buff *msg; 1408 void *reply; 1409 1410 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1411 if (!msg) 1412 return -ENOMEM; 1413 1414 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1415 MPTCP_PM_CMD_GET_LIMITS); 1416 if (!reply) 1417 goto fail; 1418 1419 if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, 1420 READ_ONCE(pernet->limit_add_addr_accepted))) 1421 goto fail; 1422 1423 if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, 1424 READ_ONCE(pernet->limit_extra_subflows))) 1425 goto fail; 1426 1427 genlmsg_end(msg, reply); 1428 return genlmsg_reply(msg, info); 1429 1430 fail: 1431 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1432 nlmsg_free(msg); 1433 return -EMSGSIZE; 1434 } 1435 1436 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, 1437 struct mptcp_addr_info *addr) 1438 { 1439 struct mptcp_rm_list list = { .nr = 0 }; 1440 1441 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1442 1443 spin_lock_bh(&msk->pm.lock); 1444 mptcp_pm_rm_subflow(msk, &list); 1445 __mark_subflow_endp_available(msk, list.ids[0]); 1446 mptcp_pm_create_subflow_or_signal_addr(msk); 1447 spin_unlock_bh(&msk->pm.lock); 1448 } 1449 1450 static void mptcp_pm_nl_set_flags_all(struct net *net, 1451 struct mptcp_pm_addr_entry *local, 1452 u8 changed) 1453 { 1454 u8 is_subflow = !!(local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW); 1455 u8 bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1456 long s_slot = 0, s_num = 0; 1457 struct mptcp_sock *msk; 1458 1459 if (changed == MPTCP_PM_ADDR_FLAG_FULLMESH && !is_subflow) 1460 return; 1461 1462 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1463 struct sock *sk = (struct sock *)msk; 1464 1465 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1466 goto next; 1467 1468 lock_sock(sk); 1469 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) 1470 mptcp_pm_mp_prio_send_ack(msk, &local->addr, NULL, bkup); 1471 /* Subflows will only be recreated if the SUBFLOW flag is set */ 1472 if (is_subflow && (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)) 1473 mptcp_pm_nl_fullmesh(msk, &local->addr); 1474 release_sock(sk); 1475 1476 next: 1477 sock_put(sk); 1478 cond_resched(); 1479 } 1480 } 1481 1482 int mptcp_pm_nl_set_flags(struct mptcp_pm_addr_entry *local, 1483 struct genl_info *info) 1484 { 1485 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; 1486 u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | 1487 MPTCP_PM_ADDR_FLAG_FULLMESH; 1488 struct net *net = genl_info_net(info); 1489 struct mptcp_pm_addr_entry *entry; 1490 struct pm_nl_pernet *pernet; 1491 u8 lookup_by_id = 0; 1492 1493 pernet = pm_nl_get_pernet(net); 1494 1495 if (local->addr.family == AF_UNSPEC) { 1496 lookup_by_id = 1; 1497 if (!local->addr.id) { 1498 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1499 "missing address ID"); 1500 return -EOPNOTSUPP; 1501 } 1502 } 1503 1504 spin_lock_bh(&pernet->lock); 1505 entry = lookup_by_id ? __lookup_addr_by_id(pernet, local->addr.id) : 1506 __lookup_addr(pernet, &local->addr); 1507 if (!entry) { 1508 spin_unlock_bh(&pernet->lock); 1509 NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); 1510 return -EINVAL; 1511 } 1512 if ((local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && 1513 (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL | 1514 MPTCP_PM_ADDR_FLAG_IMPLICIT))) { 1515 spin_unlock_bh(&pernet->lock); 1516 NL_SET_ERR_MSG_ATTR(info->extack, attr, "invalid addr flags"); 1517 return -EINVAL; 1518 } 1519 1520 changed = (local->flags ^ entry->flags) & mask; 1521 entry->flags = (entry->flags & ~mask) | (local->flags & mask); 1522 *local = *entry; 1523 1524 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1525 u8 addr_max = pernet->endp_fullmesh_max; 1526 1527 if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) 1528 addr_max++; 1529 else 1530 addr_max--; 1531 1532 WRITE_ONCE(pernet->endp_fullmesh_max, addr_max); 1533 } 1534 1535 spin_unlock_bh(&pernet->lock); 1536 1537 mptcp_pm_nl_set_flags_all(net, local, changed); 1538 return 0; 1539 } 1540 1541 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) 1542 { 1543 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 1544 1545 if (msk->pm.extra_subflows == mptcp_pm_get_limit_extra_subflows(msk) || 1546 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, 1547 MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { 1548 WRITE_ONCE(msk->pm.work_pending, false); 1549 return false; 1550 } 1551 return true; 1552 } 1553 1554 /* Called under PM lock */ 1555 void __mptcp_pm_kernel_worker(struct mptcp_sock *msk) 1556 { 1557 struct mptcp_pm_data *pm = &msk->pm; 1558 1559 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { 1560 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); 1561 mptcp_pm_nl_add_addr_received(msk); 1562 } 1563 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { 1564 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); 1565 mptcp_pm_nl_fully_established(msk); 1566 } 1567 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { 1568 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); 1569 mptcp_pm_nl_subflow_established(msk); 1570 } 1571 } 1572 1573 static int __net_init pm_nl_init_net(struct net *net) 1574 { 1575 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 1576 1577 INIT_LIST_HEAD_RCU(&pernet->endp_list); 1578 1579 /* Cit. 2 subflows ought to be enough for anybody. */ 1580 pernet->limit_extra_subflows = 2; 1581 pernet->next_id = 1; 1582 spin_lock_init(&pernet->lock); 1583 1584 /* No need to initialize other pernet fields, the struct is zeroed at 1585 * allocation time. 1586 */ 1587 1588 return 0; 1589 } 1590 1591 static void __net_exit pm_nl_exit_net(struct list_head *net_list) 1592 { 1593 struct net *net; 1594 1595 list_for_each_entry(net, net_list, exit_list) { 1596 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 1597 1598 /* net is removed from namespace list, can't race with 1599 * other modifiers, also netns core already waited for a 1600 * RCU grace period. 1601 */ 1602 __flush_addrs(&pernet->endp_list); 1603 } 1604 } 1605 1606 static struct pernet_operations mptcp_pm_pernet_ops = { 1607 .init = pm_nl_init_net, 1608 .exit_batch = pm_nl_exit_net, 1609 .id = &pm_nl_pernet_id, 1610 .size = sizeof(struct pm_nl_pernet), 1611 }; 1612 1613 struct mptcp_pm_ops mptcp_pm_kernel = { 1614 .name = "kernel", 1615 .owner = THIS_MODULE, 1616 }; 1617 1618 void __init mptcp_pm_kernel_register(void) 1619 { 1620 if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) 1621 panic("Failed to register MPTCP PM pernet subsystem.\n"); 1622 1623 mptcp_pm_register(&mptcp_pm_kernel); 1624 } 1625