1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2025, Matthieu Baerts. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <net/netns/generic.h> 10 11 #include "protocol.h" 12 #include "mib.h" 13 #include "mptcp_pm_gen.h" 14 15 static int pm_nl_pernet_id; 16 17 struct pm_nl_pernet { 18 /* protects pernet updates */ 19 spinlock_t lock; 20 struct list_head endp_list; 21 u8 endpoints; 22 u8 endp_signal_max; 23 u8 endp_subflow_max; 24 u8 endp_laminar_max; 25 u8 endp_fullmesh_max; 26 u8 limit_add_addr_accepted; 27 u8 limit_extra_subflows; 28 u8 next_id; 29 DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 30 }; 31 32 #define MPTCP_PM_ADDR_MAX 8 33 34 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) 35 { 36 return net_generic(net, pm_nl_pernet_id); 37 } 38 39 static struct pm_nl_pernet * 40 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) 41 { 42 return pm_nl_get_pernet(sock_net((struct sock *)msk)); 43 } 44 45 static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) 46 { 47 return pm_nl_get_pernet(genl_info_net(info)); 48 } 49 50 u8 mptcp_pm_get_endp_signal_max(const struct mptcp_sock *msk) 51 { 52 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 53 54 return READ_ONCE(pernet->endp_signal_max); 55 } 56 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_signal_max); 57 58 u8 mptcp_pm_get_endp_subflow_max(const struct mptcp_sock *msk) 59 { 60 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 61 62 return READ_ONCE(pernet->endp_subflow_max); 63 } 64 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_subflow_max); 65 66 u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk) 67 { 68 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 69 70 return READ_ONCE(pernet->endp_laminar_max); 71 } 72 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_laminar_max); 73 74 u8 mptcp_pm_get_endp_fullmesh_max(const struct mptcp_sock *msk) 75 { 76 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 77 78 return READ_ONCE(pernet->endp_fullmesh_max); 79 } 80 EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_fullmesh_max); 81 82 u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk) 83 { 84 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 85 86 return READ_ONCE(pernet->limit_add_addr_accepted); 87 } 88 EXPORT_SYMBOL_GPL(mptcp_pm_get_limit_add_addr_accepted); 89 90 u8 mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk) 91 { 92 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 93 94 return READ_ONCE(pernet->limit_extra_subflows); 95 } 96 EXPORT_SYMBOL_GPL(mptcp_pm_get_limit_extra_subflows); 97 98 static bool lookup_subflow_by_daddr(const struct list_head *list, 99 const struct mptcp_addr_info *daddr) 100 { 101 struct mptcp_subflow_context *subflow; 102 struct mptcp_addr_info cur; 103 104 list_for_each_entry(subflow, list, node) { 105 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 106 107 if (!((1 << inet_sk_state_load(ssk)) & 108 (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) 109 continue; 110 111 mptcp_remote_address((struct sock_common *)ssk, &cur); 112 if (mptcp_addresses_equal(&cur, daddr, daddr->port)) 113 return true; 114 } 115 116 return false; 117 } 118 119 static bool 120 select_local_address(const struct pm_nl_pernet *pernet, 121 const struct mptcp_sock *msk, 122 struct mptcp_pm_local *new_local) 123 { 124 struct mptcp_pm_addr_entry *entry; 125 bool found = false; 126 127 msk_owned_by_me(msk); 128 129 rcu_read_lock(); 130 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 131 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) 132 continue; 133 134 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 135 continue; 136 137 new_local->addr = entry->addr; 138 new_local->flags = entry->flags; 139 new_local->ifindex = entry->ifindex; 140 found = true; 141 break; 142 } 143 rcu_read_unlock(); 144 145 return found; 146 } 147 148 static bool 149 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, 150 struct mptcp_pm_local *new_local) 151 { 152 struct mptcp_pm_addr_entry *entry; 153 bool found = false; 154 155 rcu_read_lock(); 156 /* do not keep any additional per socket state, just signal 157 * the address list in order. 158 * Note: removal from the local address list during the msk life-cycle 159 * can lead to additional addresses not being announced. 160 */ 161 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 162 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) 163 continue; 164 165 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) 166 continue; 167 168 new_local->addr = entry->addr; 169 new_local->flags = entry->flags; 170 new_local->ifindex = entry->ifindex; 171 found = true; 172 break; 173 } 174 rcu_read_unlock(); 175 176 return found; 177 } 178 179 static unsigned int 180 fill_remote_addr(struct mptcp_sock *msk, struct mptcp_addr_info *local, 181 struct mptcp_addr_info *addrs) 182 { 183 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); 184 struct mptcp_addr_info remote = { 0 }; 185 struct sock *sk = (struct sock *)msk; 186 187 if (deny_id0) 188 return 0; 189 190 mptcp_remote_address((struct sock_common *)sk, &remote); 191 192 if (!mptcp_pm_addr_families_match(sk, local, &remote)) 193 return 0; 194 195 msk->pm.extra_subflows++; 196 *addrs = remote; 197 198 return 1; 199 } 200 201 static unsigned int 202 fill_remote_addresses_fullmesh(struct mptcp_sock *msk, 203 struct mptcp_addr_info *local, 204 struct mptcp_addr_info *addrs) 205 { 206 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 207 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); 208 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 209 struct sock *sk = (struct sock *)msk, *ssk; 210 struct mptcp_subflow_context *subflow; 211 int i = 0; 212 213 /* Forbid creation of new subflows matching existing ones, possibly 214 * already created by incoming ADD_ADDR 215 */ 216 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 217 mptcp_for_each_subflow(msk, subflow) 218 if (READ_ONCE(subflow->local_id) == local->id) 219 __set_bit(subflow->remote_id, unavail_id); 220 221 mptcp_for_each_subflow(msk, subflow) { 222 ssk = mptcp_subflow_tcp_sock(subflow); 223 mptcp_remote_address((struct sock_common *)ssk, &addrs[i]); 224 addrs[i].id = READ_ONCE(subflow->remote_id); 225 if (deny_id0 && !addrs[i].id) 226 continue; 227 228 if (test_bit(addrs[i].id, unavail_id)) 229 continue; 230 231 if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) 232 continue; 233 234 /* forbid creating multiple address towards this id */ 235 __set_bit(addrs[i].id, unavail_id); 236 msk->pm.extra_subflows++; 237 i++; 238 239 if (msk->pm.extra_subflows >= limit_extra_subflows) 240 break; 241 } 242 243 return i; 244 } 245 246 /* Fill all the remote addresses into the array addrs[], 247 * and return the array size. 248 */ 249 static unsigned int 250 fill_remote_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *local, 251 bool fullmesh, struct mptcp_addr_info *addrs) 252 { 253 /* Non-fullmesh: fill in the single entry corresponding to the primary 254 * MPC subflow remote address, and return 1, corresponding to 1 entry. 255 */ 256 if (!fullmesh) 257 return fill_remote_addr(msk, local, addrs); 258 259 /* Fullmesh endpoint: fill all possible remote addresses */ 260 return fill_remote_addresses_fullmesh(msk, local, addrs); 261 } 262 263 static struct mptcp_pm_addr_entry * 264 __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) 265 { 266 struct mptcp_pm_addr_entry *entry; 267 268 list_for_each_entry_rcu(entry, &pernet->endp_list, list, 269 lockdep_is_held(&pernet->lock)) { 270 if (entry->addr.id == id) 271 return entry; 272 } 273 return NULL; 274 } 275 276 static struct mptcp_pm_addr_entry * 277 __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) 278 { 279 struct mptcp_pm_addr_entry *entry; 280 281 list_for_each_entry_rcu(entry, &pernet->endp_list, list, 282 lockdep_is_held(&pernet->lock)) { 283 if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) 284 return entry; 285 } 286 return NULL; 287 } 288 289 static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, 290 const struct mptcp_addr_info *addr) 291 { 292 return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; 293 } 294 295 /* Set mpc_endpoint_id, and send MP_PRIO for ID0 if needed */ 296 static void mptcp_mpc_endpoint_setup(struct mptcp_sock *msk) 297 { 298 struct mptcp_subflow_context *subflow; 299 struct mptcp_pm_addr_entry *entry; 300 struct mptcp_addr_info mpc_addr; 301 struct pm_nl_pernet *pernet; 302 bool backup = false; 303 304 /* do lazy endpoint usage accounting for the MPC subflows */ 305 if (likely(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED)) || 306 !msk->first) 307 return; 308 309 subflow = mptcp_subflow_ctx(msk->first); 310 pernet = pm_nl_get_pernet_from_msk(msk); 311 312 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); 313 rcu_read_lock(); 314 entry = __lookup_addr(pernet, &mpc_addr); 315 if (entry) { 316 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); 317 msk->mpc_endpoint_id = entry->addr.id; 318 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 319 } 320 rcu_read_unlock(); 321 322 /* Send MP_PRIO */ 323 if (backup) 324 mptcp_pm_send_ack(msk, subflow, true, backup); 325 326 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); 327 } 328 329 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) 330 { 331 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 332 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 333 u8 endp_subflow_max = mptcp_pm_get_endp_subflow_max(msk); 334 u8 endp_signal_max = mptcp_pm_get_endp_signal_max(msk); 335 struct sock *sk = (struct sock *)msk; 336 bool signal_and_subflow = false; 337 struct mptcp_pm_local local; 338 339 mptcp_mpc_endpoint_setup(msk); 340 if (!mptcp_is_fully_established(sk)) 341 return; 342 343 pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", 344 msk->pm.local_addr_used, endp_subflow_max, 345 msk->pm.add_addr_signaled, endp_signal_max, 346 msk->pm.extra_subflows, limit_extra_subflows); 347 348 /* check first for announce */ 349 if (msk->pm.add_addr_signaled < endp_signal_max) { 350 /* due to racing events on both ends we can reach here while 351 * previous add address is still running: if we invoke now 352 * mptcp_pm_announce_addr(), that will fail and the 353 * corresponding id will be marked as used. 354 * Instead let the PM machinery reschedule us when the 355 * current address announce will be completed. 356 */ 357 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) 358 return; 359 360 if (!select_signal_address(pernet, msk, &local)) 361 goto subflow; 362 363 /* If the alloc fails, we are on memory pressure, not worth 364 * continuing, and trying to create subflows. 365 */ 366 if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) 367 return; 368 369 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 370 msk->pm.add_addr_signaled++; 371 372 /* Special case for ID0: set the correct ID */ 373 if (local.addr.id == msk->mpc_endpoint_id) 374 local.addr.id = 0; 375 376 mptcp_pm_announce_addr(msk, &local.addr, false); 377 mptcp_pm_addr_send_ack(msk); 378 379 if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 380 signal_and_subflow = true; 381 } 382 383 subflow: 384 /* No need to try establishing subflows to remote id0 if not allowed */ 385 if (mptcp_pm_add_addr_c_flag_case(msk)) 386 goto exit; 387 388 /* check if should create a new subflow */ 389 while (msk->pm.local_addr_used < endp_subflow_max && 390 msk->pm.extra_subflows < limit_extra_subflows) { 391 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; 392 bool fullmesh; 393 int i, nr; 394 395 if (signal_and_subflow) 396 signal_and_subflow = false; 397 else if (!select_local_address(pernet, msk, &local)) 398 break; 399 400 fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); 401 402 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 403 404 /* Special case for ID0: set the correct ID */ 405 if (local.addr.id == msk->mpc_endpoint_id) 406 local.addr.id = 0; 407 else /* local_addr_used is not decr for ID 0 */ 408 msk->pm.local_addr_used++; 409 410 nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); 411 if (nr == 0) 412 continue; 413 414 spin_unlock_bh(&msk->pm.lock); 415 for (i = 0; i < nr; i++) 416 __mptcp_subflow_connect(sk, &local, &addrs[i]); 417 spin_lock_bh(&msk->pm.lock); 418 } 419 420 exit: 421 /* If an endpoint has both the signal and subflow flags, but it is not 422 * possible to create subflows -- the 'while' loop body above never 423 * executed -- then still mark the endp as used, which is somehow the 424 * case. This avoids issues later when removing the endpoint and calling 425 * __mark_subflow_endp_available(), which expects the increment here. 426 */ 427 if (signal_and_subflow && local.addr.id != msk->mpc_endpoint_id) 428 msk->pm.local_addr_used++; 429 430 mptcp_pm_nl_check_work_pending(msk); 431 } 432 433 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) 434 { 435 mptcp_pm_create_subflow_or_signal_addr(msk); 436 } 437 438 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) 439 { 440 mptcp_pm_create_subflow_or_signal_addr(msk); 441 } 442 443 static unsigned int 444 fill_local_addresses_vec_fullmesh(struct mptcp_sock *msk, 445 struct mptcp_addr_info *remote, 446 struct mptcp_pm_local *locals, 447 bool c_flag_case) 448 { 449 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 450 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 451 struct sock *sk = (struct sock *)msk; 452 struct mptcp_pm_addr_entry *entry; 453 struct mptcp_pm_local *local; 454 int i = 0; 455 456 rcu_read_lock(); 457 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 458 bool is_id0; 459 460 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) 461 continue; 462 463 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) 464 continue; 465 466 local = &locals[i]; 467 local->addr = entry->addr; 468 local->flags = entry->flags; 469 local->ifindex = entry->ifindex; 470 471 is_id0 = local->addr.id == msk->mpc_endpoint_id; 472 473 if (c_flag_case && 474 (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) { 475 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 476 477 if (!is_id0) 478 msk->pm.local_addr_used++; 479 } 480 481 /* Special case for ID0: set the correct ID */ 482 if (is_id0) 483 local->addr.id = 0; 484 485 msk->pm.extra_subflows++; 486 i++; 487 488 if (msk->pm.extra_subflows >= limit_extra_subflows) 489 break; 490 } 491 rcu_read_unlock(); 492 493 return i; 494 } 495 496 static unsigned int 497 fill_local_laminar_endp(struct mptcp_sock *msk, struct mptcp_addr_info *remote, 498 struct mptcp_pm_local *locals) 499 { 500 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 501 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 502 struct mptcp_subflow_context *subflow; 503 struct sock *sk = (struct sock *)msk; 504 struct mptcp_pm_addr_entry *entry; 505 struct mptcp_pm_local *local; 506 int found = 0; 507 508 /* Forbid creation of new subflows matching existing ones, possibly 509 * already created by 'subflow' endpoints 510 */ 511 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); 512 mptcp_for_each_subflow(msk, subflow) { 513 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 514 515 if ((1 << inet_sk_state_load(ssk)) & 516 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | 517 TCPF_CLOSE)) 518 continue; 519 520 __set_bit(subflow_get_local_id(subflow), unavail_id); 521 } 522 523 rcu_read_lock(); 524 list_for_each_entry_rcu(entry, &pernet->endp_list, list) { 525 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR)) 526 continue; 527 528 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) 529 continue; 530 531 if (test_bit(mptcp_endp_get_local_id(msk, &entry->addr), 532 unavail_id)) 533 continue; 534 535 local = &locals[0]; 536 local->addr = entry->addr; 537 local->flags = entry->flags; 538 local->ifindex = entry->ifindex; 539 540 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 541 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 542 543 if (local->addr.id != msk->mpc_endpoint_id) 544 msk->pm.local_addr_used++; 545 } 546 547 msk->pm.extra_subflows++; 548 found = 1; 549 break; 550 } 551 rcu_read_unlock(); 552 553 return found; 554 } 555 556 static unsigned int 557 fill_local_addresses_vec_c_flag(struct mptcp_sock *msk, 558 struct mptcp_addr_info *remote, 559 struct mptcp_pm_local *locals) 560 { 561 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 562 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 563 u8 endp_subflow_max = mptcp_pm_get_endp_subflow_max(msk); 564 struct sock *sk = (struct sock *)msk; 565 struct mptcp_pm_local *local; 566 int i = 0; 567 568 while (msk->pm.local_addr_used < endp_subflow_max) { 569 local = &locals[i]; 570 571 if (!select_local_address(pernet, msk, local)) 572 break; 573 574 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); 575 576 if (!mptcp_pm_addr_families_match(sk, &local->addr, remote)) 577 continue; 578 579 if (local->addr.id == msk->mpc_endpoint_id) 580 continue; 581 582 msk->pm.local_addr_used++; 583 msk->pm.extra_subflows++; 584 i++; 585 586 if (msk->pm.extra_subflows >= limit_extra_subflows) 587 break; 588 } 589 590 return i; 591 } 592 593 static unsigned int 594 fill_local_address_any(struct mptcp_sock *msk, struct mptcp_addr_info *remote, 595 struct mptcp_pm_local *local) 596 { 597 struct sock *sk = (struct sock *)msk; 598 599 memset(local, 0, sizeof(*local)); 600 local->addr.family = 601 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 602 remote->family == AF_INET6 && 603 ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : 604 #endif 605 remote->family; 606 607 if (!mptcp_pm_addr_families_match(sk, &local->addr, remote)) 608 return 0; 609 610 msk->pm.extra_subflows++; 611 612 return 1; 613 } 614 615 /* Fill all the local addresses into the array addrs[], 616 * and return the array size. 617 */ 618 static unsigned int 619 fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *remote, 620 struct mptcp_pm_local *locals) 621 { 622 bool c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk); 623 624 /* If there is at least one MPTCP endpoint with a fullmesh flag */ 625 if (mptcp_pm_get_endp_fullmesh_max(msk)) 626 return fill_local_addresses_vec_fullmesh(msk, remote, locals, 627 c_flag_case); 628 629 /* If there is at least one MPTCP endpoint with a laminar flag */ 630 if (mptcp_pm_get_endp_laminar_max(msk)) 631 return fill_local_laminar_endp(msk, remote, locals); 632 633 /* Special case: peer sets the C flag, accept one ADD_ADDR if default 634 * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints 635 */ 636 if (c_flag_case) 637 return fill_local_addresses_vec_c_flag(msk, remote, locals); 638 639 /* No special case: fill in the single 'IPADDRANY' local address */ 640 return fill_local_address_any(msk, remote, &locals[0]); 641 } 642 643 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) 644 { 645 u8 limit_add_addr_accepted = mptcp_pm_get_limit_add_addr_accepted(msk); 646 u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); 647 struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX]; 648 struct sock *sk = (struct sock *)msk; 649 struct mptcp_addr_info remote; 650 bool sf_created = false; 651 int i, nr; 652 653 pr_debug("accepted %d:%d remote family %d\n", 654 msk->pm.add_addr_accepted, limit_add_addr_accepted, 655 msk->pm.remote.family); 656 657 remote = msk->pm.remote; 658 mptcp_pm_announce_addr(msk, &remote, true); 659 mptcp_pm_addr_send_ack(msk); 660 mptcp_mpc_endpoint_setup(msk); 661 662 if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) 663 return; 664 665 /* pick id 0 port, if none is provided the remote address */ 666 if (!remote.port) 667 remote.port = sk->sk_dport; 668 669 /* connect to the specified remote address, using whatever 670 * local address the routing configuration will pick. 671 */ 672 nr = fill_local_addresses_vec(msk, &remote, locals); 673 if (nr == 0) 674 return; 675 676 spin_unlock_bh(&msk->pm.lock); 677 for (i = 0; i < nr; i++) 678 if (__mptcp_subflow_connect(sk, &locals[i], &remote) == 0) 679 sf_created = true; 680 spin_lock_bh(&msk->pm.lock); 681 682 if (sf_created) { 683 /* add_addr_accepted is not decr for ID 0 */ 684 if (remote.id) 685 msk->pm.add_addr_accepted++; 686 if (msk->pm.add_addr_accepted >= limit_add_addr_accepted || 687 msk->pm.extra_subflows >= limit_extra_subflows) 688 WRITE_ONCE(msk->pm.accept_addr, false); 689 } 690 } 691 692 void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id) 693 { 694 if (rm_id && !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { 695 u8 limit_add_addr_accepted = 696 mptcp_pm_get_limit_add_addr_accepted(msk); 697 698 /* Note: if the subflow has been closed before, this 699 * add_addr_accepted counter will not be decremented. 700 */ 701 if (--msk->pm.add_addr_accepted < limit_add_addr_accepted) 702 WRITE_ONCE(msk->pm.accept_addr, true); 703 } 704 } 705 706 static bool address_use_port(struct mptcp_pm_addr_entry *entry) 707 { 708 return (entry->flags & 709 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == 710 MPTCP_PM_ADDR_FLAG_SIGNAL; 711 } 712 713 /* caller must ensure the RCU grace period is already elapsed */ 714 static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) 715 { 716 if (entry->lsk) 717 sock_release(entry->lsk); 718 kfree(entry); 719 } 720 721 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, 722 struct mptcp_pm_addr_entry *entry, 723 bool needs_id, bool replace) 724 { 725 struct mptcp_pm_addr_entry *cur, *del_entry = NULL; 726 int ret = -EINVAL; 727 u8 addr_max; 728 729 spin_lock_bh(&pernet->lock); 730 /* to keep the code simple, don't do IDR-like allocation for address ID, 731 * just bail when we exceed limits 732 */ 733 if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) 734 pernet->next_id = 1; 735 if (pernet->endpoints >= MPTCP_PM_ADDR_MAX) { 736 ret = -ERANGE; 737 goto out; 738 } 739 if (test_bit(entry->addr.id, pernet->id_bitmap)) { 740 ret = -EBUSY; 741 goto out; 742 } 743 744 /* do not insert duplicate address, differentiate on port only 745 * singled addresses 746 */ 747 if (!address_use_port(entry)) 748 entry->addr.port = 0; 749 list_for_each_entry(cur, &pernet->endp_list, list) { 750 if (mptcp_addresses_equal(&cur->addr, &entry->addr, 751 cur->addr.port || entry->addr.port)) { 752 /* allow replacing the exiting endpoint only if such 753 * endpoint is an implicit one and the user-space 754 * did not provide an endpoint id 755 */ 756 if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { 757 ret = -EEXIST; 758 goto out; 759 } 760 if (entry->addr.id) 761 goto out; 762 763 /* allow callers that only need to look up the local 764 * addr's id to skip replacement. This allows them to 765 * avoid calling synchronize_rcu in the packet recv 766 * path. 767 */ 768 if (!replace) { 769 kfree(entry); 770 ret = cur->addr.id; 771 goto out; 772 } 773 774 pernet->endpoints--; 775 entry->addr.id = cur->addr.id; 776 list_del_rcu(&cur->list); 777 del_entry = cur; 778 break; 779 } 780 } 781 782 if (!entry->addr.id && needs_id) { 783 find_next: 784 entry->addr.id = find_next_zero_bit(pernet->id_bitmap, 785 MPTCP_PM_MAX_ADDR_ID + 1, 786 pernet->next_id); 787 if (!entry->addr.id && pernet->next_id != 1) { 788 pernet->next_id = 1; 789 goto find_next; 790 } 791 } 792 793 if (!entry->addr.id && needs_id) 794 goto out; 795 796 __set_bit(entry->addr.id, pernet->id_bitmap); 797 if (entry->addr.id > pernet->next_id) 798 pernet->next_id = entry->addr.id; 799 800 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 801 addr_max = pernet->endp_signal_max; 802 WRITE_ONCE(pernet->endp_signal_max, addr_max + 1); 803 } 804 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 805 addr_max = pernet->endp_subflow_max; 806 WRITE_ONCE(pernet->endp_subflow_max, addr_max + 1); 807 } 808 if (entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR) { 809 addr_max = pernet->endp_laminar_max; 810 WRITE_ONCE(pernet->endp_laminar_max, addr_max + 1); 811 } 812 if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 813 addr_max = pernet->endp_fullmesh_max; 814 WRITE_ONCE(pernet->endp_fullmesh_max, addr_max + 1); 815 } 816 817 pernet->endpoints++; 818 if (!entry->addr.port) 819 list_add_tail_rcu(&entry->list, &pernet->endp_list); 820 else 821 list_add_rcu(&entry->list, &pernet->endp_list); 822 ret = entry->addr.id; 823 824 out: 825 spin_unlock_bh(&pernet->lock); 826 827 /* just replaced an existing entry, free it */ 828 if (del_entry) { 829 synchronize_rcu(); 830 __mptcp_pm_release_addr_entry(del_entry); 831 } 832 return ret; 833 } 834 835 static struct lock_class_key mptcp_slock_keys[2]; 836 static struct lock_class_key mptcp_keys[2]; 837 838 static int mptcp_pm_nl_create_listen_socket(struct sock *sk, 839 struct mptcp_pm_addr_entry *entry) 840 { 841 bool is_ipv6 = sk->sk_family == AF_INET6; 842 int addrlen = sizeof(struct sockaddr_in); 843 struct sockaddr_storage addr; 844 struct sock *newsk, *ssk; 845 int backlog = 1024; 846 int err; 847 848 err = sock_create_kern(sock_net(sk), entry->addr.family, 849 SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); 850 if (err) 851 return err; 852 853 newsk = entry->lsk->sk; 854 if (!newsk) 855 return -EINVAL; 856 857 /* The subflow socket lock is acquired in a nested to the msk one 858 * in several places, even by the TCP stack, and this msk is a kernel 859 * socket: lockdep complains. Instead of propagating the _nested 860 * modifiers in several places, re-init the lock class for the msk 861 * socket to an mptcp specific one. 862 */ 863 sock_lock_init_class_and_name(newsk, 864 is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", 865 &mptcp_slock_keys[is_ipv6], 866 is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", 867 &mptcp_keys[is_ipv6]); 868 869 lock_sock(newsk); 870 ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); 871 release_sock(newsk); 872 if (IS_ERR(ssk)) 873 return PTR_ERR(ssk); 874 875 mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); 876 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 877 if (entry->addr.family == AF_INET6) 878 addrlen = sizeof(struct sockaddr_in6); 879 #endif 880 if (ssk->sk_family == AF_INET) 881 err = inet_bind_sk(ssk, (struct sockaddr_unsized *)&addr, addrlen); 882 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 883 else if (ssk->sk_family == AF_INET6) 884 err = inet6_bind_sk(ssk, (struct sockaddr_unsized *)&addr, addrlen); 885 #endif 886 if (err) 887 return err; 888 889 /* We don't use mptcp_set_state() here because it needs to be called 890 * under the msk socket lock. For the moment, that will not bring 891 * anything more than only calling inet_sk_state_store(), because the 892 * old status is known (TCP_CLOSE). 893 */ 894 inet_sk_state_store(newsk, TCP_LISTEN); 895 lock_sock(ssk); 896 WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true); 897 err = __inet_listen_sk(ssk, backlog); 898 if (!err) 899 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); 900 release_sock(ssk); 901 return err; 902 } 903 904 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, 905 struct mptcp_pm_addr_entry *skc) 906 { 907 struct mptcp_pm_addr_entry *entry; 908 struct pm_nl_pernet *pernet; 909 int ret; 910 911 pernet = pm_nl_get_pernet_from_msk(msk); 912 913 rcu_read_lock(); 914 entry = __lookup_addr(pernet, &skc->addr); 915 ret = entry ? entry->addr.id : -1; 916 rcu_read_unlock(); 917 if (ret >= 0) 918 return ret; 919 920 /* address not found, add to local list */ 921 entry = kmemdup(skc, sizeof(*skc), GFP_ATOMIC); 922 if (!entry) 923 return -ENOMEM; 924 925 entry->addr.port = 0; 926 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false); 927 if (ret < 0) 928 kfree(entry); 929 930 return ret; 931 } 932 933 bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) 934 { 935 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 936 struct mptcp_pm_addr_entry *entry; 937 bool backup; 938 939 rcu_read_lock(); 940 entry = __lookup_addr(pernet, skc); 941 backup = entry && !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 942 rcu_read_unlock(); 943 944 return backup; 945 } 946 947 static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, 948 struct mptcp_addr_info *addr) 949 { 950 struct mptcp_sock *msk; 951 long s_slot = 0, s_num = 0; 952 953 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 954 struct sock *sk = (struct sock *)msk; 955 struct mptcp_addr_info mpc_addr; 956 957 if (!READ_ONCE(msk->fully_established) || 958 mptcp_pm_is_userspace(msk)) 959 goto next; 960 961 /* if the endp linked to the init sf is re-added with a != ID */ 962 mptcp_local_address((struct sock_common *)msk, &mpc_addr); 963 964 lock_sock(sk); 965 spin_lock_bh(&msk->pm.lock); 966 if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) 967 msk->mpc_endpoint_id = addr->id; 968 mptcp_pm_create_subflow_or_signal_addr(msk); 969 spin_unlock_bh(&msk->pm.lock); 970 release_sock(sk); 971 972 next: 973 sock_put(sk); 974 cond_resched(); 975 } 976 977 return 0; 978 } 979 980 static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, 981 struct genl_info *info) 982 { 983 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 984 985 if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 986 mptcp_pm_address_nl_policy, info->extack) && 987 tb[MPTCP_PM_ADDR_ATTR_ID]) 988 return true; 989 return false; 990 } 991 992 /* Add an MPTCP endpoint */ 993 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) 994 { 995 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 996 struct mptcp_pm_addr_entry addr, *entry; 997 struct nlattr *attr; 998 int ret; 999 1000 if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) 1001 return -EINVAL; 1002 1003 attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1004 ret = mptcp_pm_parse_entry(attr, info, true, &addr); 1005 if (ret < 0) 1006 return ret; 1007 1008 if (addr.addr.port && !address_use_port(&addr)) { 1009 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1010 "flags must have signal and not subflow when using port"); 1011 return -EINVAL; 1012 } 1013 1014 if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && 1015 addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1016 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1017 "flags mustn't have both signal and fullmesh"); 1018 return -EINVAL; 1019 } 1020 1021 if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { 1022 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1023 "can't create IMPLICIT endpoint"); 1024 return -EINVAL; 1025 } 1026 1027 entry = kmemdup(&addr, sizeof(addr), GFP_KERNEL_ACCOUNT); 1028 if (!entry) { 1029 GENL_SET_ERR_MSG(info, "can't allocate addr"); 1030 return -ENOMEM; 1031 } 1032 1033 if (entry->addr.port) { 1034 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); 1035 if (ret) { 1036 GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); 1037 goto out_free; 1038 } 1039 } 1040 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, 1041 !mptcp_pm_has_addr_attr_id(attr, info), 1042 true); 1043 if (ret < 0) { 1044 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); 1045 goto out_free; 1046 } 1047 1048 mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); 1049 return 0; 1050 1051 out_free: 1052 __mptcp_pm_release_addr_entry(entry); 1053 return ret; 1054 } 1055 1056 static void mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, 1057 const struct mptcp_addr_info *addr, 1058 bool force) 1059 { 1060 struct mptcp_rm_list list = { .nr = 0 }; 1061 bool announced; 1062 1063 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1064 1065 announced = mptcp_remove_anno_list_by_saddr(msk, addr); 1066 if (announced || force) { 1067 spin_lock_bh(&msk->pm.lock); 1068 if (announced) 1069 msk->pm.add_addr_signaled--; 1070 mptcp_pm_remove_addr(msk, &list); 1071 spin_unlock_bh(&msk->pm.lock); 1072 } 1073 } 1074 1075 static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) 1076 { 1077 /* If it was marked as used, and not ID 0, decrement local_addr_used */ 1078 if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && 1079 id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) 1080 msk->pm.local_addr_used--; 1081 } 1082 1083 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, 1084 const struct mptcp_pm_addr_entry *entry) 1085 { 1086 const struct mptcp_addr_info *addr = &entry->addr; 1087 struct mptcp_rm_list list = { .nr = 1 }; 1088 long s_slot = 0, s_num = 0; 1089 struct mptcp_sock *msk; 1090 1091 pr_debug("remove_id=%d\n", addr->id); 1092 1093 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1094 struct sock *sk = (struct sock *)msk; 1095 bool remove_subflow; 1096 1097 if (mptcp_pm_is_userspace(msk)) 1098 goto next; 1099 1100 lock_sock(sk); 1101 remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr); 1102 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && 1103 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); 1104 1105 list.ids[0] = mptcp_endp_get_local_id(msk, addr); 1106 1107 spin_lock_bh(&msk->pm.lock); 1108 if (remove_subflow) 1109 mptcp_pm_rm_subflow(msk, &list); 1110 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) 1111 __mark_subflow_endp_available(msk, list.ids[0]); 1112 else /* mark endp ID as available, e.g. Signal or MPC endp */ 1113 __set_bit(addr->id, msk->pm.id_avail_bitmap); 1114 spin_unlock_bh(&msk->pm.lock); 1115 1116 if (msk->mpc_endpoint_id == entry->addr.id) 1117 msk->mpc_endpoint_id = 0; 1118 release_sock(sk); 1119 1120 next: 1121 sock_put(sk); 1122 cond_resched(); 1123 } 1124 1125 return 0; 1126 } 1127 1128 static int mptcp_nl_remove_id_zero_address(struct net *net, 1129 struct mptcp_addr_info *addr) 1130 { 1131 struct mptcp_rm_list list = { .nr = 0 }; 1132 long s_slot = 0, s_num = 0; 1133 struct mptcp_sock *msk; 1134 1135 list.ids[list.nr++] = 0; 1136 1137 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1138 struct sock *sk = (struct sock *)msk; 1139 struct mptcp_addr_info msk_local; 1140 1141 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1142 goto next; 1143 1144 mptcp_local_address((struct sock_common *)msk, &msk_local); 1145 if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) 1146 goto next; 1147 1148 lock_sock(sk); 1149 spin_lock_bh(&msk->pm.lock); 1150 mptcp_pm_remove_addr(msk, &list); 1151 mptcp_pm_rm_subflow(msk, &list); 1152 __mark_subflow_endp_available(msk, 0); 1153 spin_unlock_bh(&msk->pm.lock); 1154 release_sock(sk); 1155 1156 next: 1157 sock_put(sk); 1158 cond_resched(); 1159 } 1160 1161 return 0; 1162 } 1163 1164 /* Remove an MPTCP endpoint */ 1165 int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) 1166 { 1167 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1168 struct mptcp_pm_addr_entry addr, *entry; 1169 struct nlattr *attr; 1170 u8 addr_max; 1171 int ret; 1172 1173 if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) 1174 return -EINVAL; 1175 1176 attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; 1177 ret = mptcp_pm_parse_entry(attr, info, false, &addr); 1178 if (ret < 0) 1179 return ret; 1180 1181 /* the zero id address is special: the first address used by the msk 1182 * always gets such an id, so different subflows can have different zero 1183 * id addresses. Additionally zero id is not accounted for in id_bitmap. 1184 * Let's use an 'mptcp_rm_list' instead of the common remove code. 1185 */ 1186 if (addr.addr.id == 0) 1187 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); 1188 1189 spin_lock_bh(&pernet->lock); 1190 entry = __lookup_addr_by_id(pernet, addr.addr.id); 1191 if (!entry) { 1192 NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); 1193 spin_unlock_bh(&pernet->lock); 1194 return -EINVAL; 1195 } 1196 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { 1197 addr_max = pernet->endp_signal_max; 1198 WRITE_ONCE(pernet->endp_signal_max, addr_max - 1); 1199 } 1200 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { 1201 addr_max = pernet->endp_subflow_max; 1202 WRITE_ONCE(pernet->endp_subflow_max, addr_max - 1); 1203 } 1204 if (entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR) { 1205 addr_max = pernet->endp_laminar_max; 1206 WRITE_ONCE(pernet->endp_laminar_max, addr_max - 1); 1207 } 1208 if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1209 addr_max = pernet->endp_fullmesh_max; 1210 WRITE_ONCE(pernet->endp_fullmesh_max, addr_max - 1); 1211 } 1212 1213 pernet->endpoints--; 1214 list_del_rcu(&entry->list); 1215 __clear_bit(entry->addr.id, pernet->id_bitmap); 1216 spin_unlock_bh(&pernet->lock); 1217 1218 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); 1219 synchronize_rcu(); 1220 __mptcp_pm_release_addr_entry(entry); 1221 1222 return ret; 1223 } 1224 1225 static void mptcp_pm_flush_addrs_and_subflows(struct mptcp_sock *msk, 1226 struct list_head *rm_list) 1227 { 1228 struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; 1229 struct mptcp_pm_addr_entry *entry; 1230 1231 list_for_each_entry(entry, rm_list, list) { 1232 if (slist.nr < MPTCP_RM_IDS_MAX && 1233 mptcp_lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) 1234 slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1235 1236 if (alist.nr < MPTCP_RM_IDS_MAX && 1237 mptcp_remove_anno_list_by_saddr(msk, &entry->addr)) 1238 alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1239 } 1240 1241 spin_lock_bh(&msk->pm.lock); 1242 if (alist.nr) { 1243 msk->pm.add_addr_signaled -= alist.nr; 1244 mptcp_pm_remove_addr(msk, &alist); 1245 } 1246 if (slist.nr) 1247 mptcp_pm_rm_subflow(msk, &slist); 1248 /* Reset counters: maybe some subflows have been removed before */ 1249 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1250 msk->pm.local_addr_used = 0; 1251 spin_unlock_bh(&msk->pm.lock); 1252 } 1253 1254 static void mptcp_nl_flush_addrs_list(struct net *net, 1255 struct list_head *rm_list) 1256 { 1257 long s_slot = 0, s_num = 0; 1258 struct mptcp_sock *msk; 1259 1260 if (list_empty(rm_list)) 1261 return; 1262 1263 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1264 struct sock *sk = (struct sock *)msk; 1265 1266 if (!mptcp_pm_is_userspace(msk)) { 1267 lock_sock(sk); 1268 mptcp_pm_flush_addrs_and_subflows(msk, rm_list); 1269 release_sock(sk); 1270 } 1271 1272 sock_put(sk); 1273 cond_resched(); 1274 } 1275 } 1276 1277 /* caller must ensure the RCU grace period is already elapsed */ 1278 static void __flush_addrs(struct list_head *list) 1279 { 1280 while (!list_empty(list)) { 1281 struct mptcp_pm_addr_entry *cur; 1282 1283 cur = list_entry(list->next, 1284 struct mptcp_pm_addr_entry, list); 1285 list_del_rcu(&cur->list); 1286 __mptcp_pm_release_addr_entry(cur); 1287 } 1288 } 1289 1290 static void __reset_counters(struct pm_nl_pernet *pernet) 1291 { 1292 WRITE_ONCE(pernet->endp_signal_max, 0); 1293 WRITE_ONCE(pernet->endp_subflow_max, 0); 1294 WRITE_ONCE(pernet->endp_laminar_max, 0); 1295 pernet->endpoints = 0; 1296 } 1297 1298 int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) 1299 { 1300 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1301 struct list_head free_list; 1302 1303 spin_lock_bh(&pernet->lock); 1304 free_list = pernet->endp_list; 1305 INIT_LIST_HEAD_RCU(&pernet->endp_list); 1306 __reset_counters(pernet); 1307 pernet->next_id = 1; 1308 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1309 spin_unlock_bh(&pernet->lock); 1310 1311 if (free_list.next == &pernet->endp_list) 1312 return 0; 1313 1314 synchronize_rcu(); 1315 1316 /* Adjust the pointers to free_list instead of pernet->endp_list */ 1317 free_list.prev->next = &free_list; 1318 free_list.next->prev = &free_list; 1319 1320 mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); 1321 __flush_addrs(&free_list); 1322 return 0; 1323 } 1324 1325 int mptcp_pm_nl_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, 1326 struct genl_info *info) 1327 { 1328 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1329 struct mptcp_pm_addr_entry *entry; 1330 int ret = -EINVAL; 1331 1332 rcu_read_lock(); 1333 entry = __lookup_addr_by_id(pernet, id); 1334 if (entry) { 1335 *addr = *entry; 1336 ret = 0; 1337 } 1338 rcu_read_unlock(); 1339 1340 return ret; 1341 } 1342 1343 int mptcp_pm_nl_dump_addr(struct sk_buff *msg, 1344 struct netlink_callback *cb) 1345 { 1346 struct net *net = sock_net(msg->sk); 1347 struct mptcp_pm_addr_entry *entry; 1348 struct pm_nl_pernet *pernet; 1349 int id = cb->args[0]; 1350 int i; 1351 1352 pernet = pm_nl_get_pernet(net); 1353 1354 rcu_read_lock(); 1355 for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { 1356 if (test_bit(i, pernet->id_bitmap)) { 1357 entry = __lookup_addr_by_id(pernet, i); 1358 if (!entry) 1359 break; 1360 1361 if (entry->addr.id <= id) 1362 continue; 1363 1364 if (mptcp_pm_genl_fill_addr(msg, cb, entry) < 0) 1365 break; 1366 1367 id = entry->addr.id; 1368 } 1369 } 1370 rcu_read_unlock(); 1371 1372 cb->args[0] = id; 1373 return msg->len; 1374 } 1375 1376 static int parse_limit(struct genl_info *info, int id, unsigned int *limit) 1377 { 1378 struct nlattr *attr = info->attrs[id]; 1379 1380 if (!attr) 1381 return 0; 1382 1383 *limit = nla_get_u32(attr); 1384 if (*limit > MPTCP_PM_ADDR_MAX) { 1385 NL_SET_ERR_MSG_ATTR_FMT(info->extack, attr, 1386 "limit greater than maximum (%u)", 1387 MPTCP_PM_ADDR_MAX); 1388 return -EINVAL; 1389 } 1390 return 0; 1391 } 1392 1393 int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) 1394 { 1395 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1396 unsigned int rcv_addrs, subflows; 1397 int ret; 1398 1399 spin_lock_bh(&pernet->lock); 1400 rcv_addrs = pernet->limit_add_addr_accepted; 1401 ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); 1402 if (ret) 1403 goto unlock; 1404 1405 subflows = pernet->limit_extra_subflows; 1406 ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); 1407 if (ret) 1408 goto unlock; 1409 1410 WRITE_ONCE(pernet->limit_add_addr_accepted, rcv_addrs); 1411 WRITE_ONCE(pernet->limit_extra_subflows, subflows); 1412 1413 unlock: 1414 spin_unlock_bh(&pernet->lock); 1415 return ret; 1416 } 1417 1418 int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) 1419 { 1420 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1421 struct sk_buff *msg; 1422 void *reply; 1423 1424 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1425 if (!msg) 1426 return -ENOMEM; 1427 1428 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, 1429 MPTCP_PM_CMD_GET_LIMITS); 1430 if (!reply) 1431 goto fail; 1432 1433 if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, 1434 READ_ONCE(pernet->limit_add_addr_accepted))) 1435 goto fail; 1436 1437 if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, 1438 READ_ONCE(pernet->limit_extra_subflows))) 1439 goto fail; 1440 1441 genlmsg_end(msg, reply); 1442 return genlmsg_reply(msg, info); 1443 1444 fail: 1445 GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); 1446 nlmsg_free(msg); 1447 return -EMSGSIZE; 1448 } 1449 1450 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, 1451 struct mptcp_addr_info *addr) 1452 { 1453 struct mptcp_rm_list list = { .nr = 0 }; 1454 1455 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1456 1457 spin_lock_bh(&msk->pm.lock); 1458 mptcp_pm_rm_subflow(msk, &list); 1459 __mark_subflow_endp_available(msk, list.ids[0]); 1460 mptcp_pm_create_subflow_or_signal_addr(msk); 1461 spin_unlock_bh(&msk->pm.lock); 1462 } 1463 1464 static void mptcp_pm_nl_set_flags_all(struct net *net, 1465 struct mptcp_pm_addr_entry *local, 1466 u8 changed) 1467 { 1468 u8 is_subflow = !!(local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW); 1469 u8 bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); 1470 long s_slot = 0, s_num = 0; 1471 struct mptcp_sock *msk; 1472 1473 if (changed == MPTCP_PM_ADDR_FLAG_FULLMESH && !is_subflow) 1474 return; 1475 1476 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1477 struct sock *sk = (struct sock *)msk; 1478 1479 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) 1480 goto next; 1481 1482 lock_sock(sk); 1483 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) 1484 mptcp_pm_mp_prio_send_ack(msk, &local->addr, NULL, bkup); 1485 /* Subflows will only be recreated if the SUBFLOW flag is set */ 1486 if (is_subflow && (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)) 1487 mptcp_pm_nl_fullmesh(msk, &local->addr); 1488 release_sock(sk); 1489 1490 next: 1491 sock_put(sk); 1492 cond_resched(); 1493 } 1494 } 1495 1496 int mptcp_pm_nl_set_flags(struct mptcp_pm_addr_entry *local, 1497 struct genl_info *info) 1498 { 1499 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; 1500 u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | 1501 MPTCP_PM_ADDR_FLAG_FULLMESH; 1502 struct net *net = genl_info_net(info); 1503 struct mptcp_pm_addr_entry *entry; 1504 struct pm_nl_pernet *pernet; 1505 u8 lookup_by_id = 0; 1506 1507 pernet = pm_nl_get_pernet(net); 1508 1509 if (local->addr.family == AF_UNSPEC) { 1510 lookup_by_id = 1; 1511 if (!local->addr.id) { 1512 NL_SET_ERR_MSG_ATTR(info->extack, attr, 1513 "missing address ID"); 1514 return -EOPNOTSUPP; 1515 } 1516 } 1517 1518 spin_lock_bh(&pernet->lock); 1519 entry = lookup_by_id ? __lookup_addr_by_id(pernet, local->addr.id) : 1520 __lookup_addr(pernet, &local->addr); 1521 if (!entry) { 1522 spin_unlock_bh(&pernet->lock); 1523 NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); 1524 return -EINVAL; 1525 } 1526 if ((local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && 1527 (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL | 1528 MPTCP_PM_ADDR_FLAG_IMPLICIT))) { 1529 spin_unlock_bh(&pernet->lock); 1530 NL_SET_ERR_MSG_ATTR(info->extack, attr, "invalid addr flags"); 1531 return -EINVAL; 1532 } 1533 1534 changed = (local->flags ^ entry->flags) & mask; 1535 entry->flags = (entry->flags & ~mask) | (local->flags & mask); 1536 *local = *entry; 1537 1538 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) { 1539 u8 addr_max = pernet->endp_fullmesh_max; 1540 1541 if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) 1542 addr_max++; 1543 else 1544 addr_max--; 1545 1546 WRITE_ONCE(pernet->endp_fullmesh_max, addr_max); 1547 } 1548 1549 spin_unlock_bh(&pernet->lock); 1550 1551 mptcp_pm_nl_set_flags_all(net, local, changed); 1552 return 0; 1553 } 1554 1555 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) 1556 { 1557 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); 1558 1559 if (msk->pm.extra_subflows == mptcp_pm_get_limit_extra_subflows(msk) || 1560 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, 1561 MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { 1562 WRITE_ONCE(msk->pm.work_pending, false); 1563 return false; 1564 } 1565 return true; 1566 } 1567 1568 /* Called under PM lock */ 1569 void __mptcp_pm_kernel_worker(struct mptcp_sock *msk) 1570 { 1571 struct mptcp_pm_data *pm = &msk->pm; 1572 1573 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { 1574 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); 1575 mptcp_pm_nl_add_addr_received(msk); 1576 } 1577 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { 1578 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); 1579 mptcp_pm_nl_fully_established(msk); 1580 } 1581 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { 1582 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); 1583 mptcp_pm_nl_subflow_established(msk); 1584 } 1585 } 1586 1587 static int __net_init pm_nl_init_net(struct net *net) 1588 { 1589 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 1590 1591 INIT_LIST_HEAD_RCU(&pernet->endp_list); 1592 1593 /* Cit. 2 subflows ought to be enough for anybody. */ 1594 pernet->limit_extra_subflows = 2; 1595 pernet->next_id = 1; 1596 spin_lock_init(&pernet->lock); 1597 1598 /* No need to initialize other pernet fields, the struct is zeroed at 1599 * allocation time. 1600 */ 1601 1602 return 0; 1603 } 1604 1605 static void __net_exit pm_nl_exit_net(struct list_head *net_list) 1606 { 1607 struct net *net; 1608 1609 list_for_each_entry(net, net_list, exit_list) { 1610 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); 1611 1612 /* net is removed from namespace list, can't race with 1613 * other modifiers, also netns core already waited for a 1614 * RCU grace period. 1615 */ 1616 __flush_addrs(&pernet->endp_list); 1617 } 1618 } 1619 1620 static struct pernet_operations mptcp_pm_pernet_ops = { 1621 .init = pm_nl_init_net, 1622 .exit_batch = pm_nl_exit_net, 1623 .id = &pm_nl_pernet_id, 1624 .size = sizeof(struct pm_nl_pernet), 1625 }; 1626 1627 struct mptcp_pm_ops mptcp_pm_kernel = { 1628 .name = "kernel", 1629 .owner = THIS_MODULE, 1630 }; 1631 1632 void __init mptcp_pm_kernel_register(void) 1633 { 1634 if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) 1635 panic("Failed to register MPTCP PM pernet subsystem.\n"); 1636 1637 mptcp_pm_register(&mptcp_pm_kernel); 1638 } 1639