1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/kernel.h> 9 #include <net/mptcp.h> 10 #include "protocol.h" 11 12 #include "mib.h" 13 14 /* path manager command handlers */ 15 16 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 17 const struct mptcp_addr_info *addr, 18 bool echo) 19 { 20 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 21 22 pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); 23 24 lockdep_assert_held(&msk->pm.lock); 25 26 if (add_addr & 27 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { 28 MPTCP_INC_STATS(sock_net((struct sock *)msk), 29 echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP); 30 return -EINVAL; 31 } 32 33 if (echo) { 34 msk->pm.remote = *addr; 35 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 36 } else { 37 msk->pm.local = *addr; 38 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 39 } 40 WRITE_ONCE(msk->pm.addr_signal, add_addr); 41 return 0; 42 } 43 44 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 45 { 46 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 47 48 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 49 50 if (rm_addr) { 51 MPTCP_ADD_STATS(sock_net((struct sock *)msk), 52 MPTCP_MIB_RMADDRTXDROP, rm_list->nr); 53 return -EINVAL; 54 } 55 56 msk->pm.rm_list_tx = *rm_list; 57 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); 58 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 59 mptcp_pm_nl_addr_send_ack(msk); 60 return 0; 61 } 62 63 /* path manager event handlers */ 64 65 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) 66 { 67 struct mptcp_pm_data *pm = &msk->pm; 68 69 pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side); 70 71 WRITE_ONCE(pm->server_side, server_side); 72 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); 73 } 74 75 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 76 { 77 struct mptcp_pm_data *pm = &msk->pm; 78 unsigned int subflows_max; 79 int ret = 0; 80 81 if (mptcp_pm_is_userspace(msk)) { 82 if (mptcp_userspace_pm_active(msk)) { 83 spin_lock_bh(&pm->lock); 84 pm->subflows++; 85 spin_unlock_bh(&pm->lock); 86 return true; 87 } 88 return false; 89 } 90 91 subflows_max = mptcp_pm_get_subflows_max(msk); 92 93 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 94 subflows_max, READ_ONCE(pm->accept_subflow)); 95 96 /* try to avoid acquiring the lock below */ 97 if (!READ_ONCE(pm->accept_subflow)) 98 return false; 99 100 spin_lock_bh(&pm->lock); 101 if (READ_ONCE(pm->accept_subflow)) { 102 ret = pm->subflows < subflows_max; 103 if (ret && ++pm->subflows == subflows_max) 104 WRITE_ONCE(pm->accept_subflow, false); 105 } 106 spin_unlock_bh(&pm->lock); 107 108 return ret; 109 } 110 111 /* return true if the new status bit is currently cleared, that is, this event 112 * can be server, eventually by an already scheduled work 113 */ 114 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 115 enum mptcp_pm_status new_status) 116 { 117 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 118 BIT(new_status)); 119 if (msk->pm.status & BIT(new_status)) 120 return false; 121 122 msk->pm.status |= BIT(new_status); 123 mptcp_schedule_work((struct sock *)msk); 124 return true; 125 } 126 127 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) 128 { 129 struct mptcp_pm_data *pm = &msk->pm; 130 bool announce = false; 131 132 pr_debug("msk=%p", msk); 133 134 spin_lock_bh(&pm->lock); 135 136 /* mptcp_pm_fully_established() can be invoked by multiple 137 * racing paths - accept() and check_fully_established() 138 * be sure to serve this event only once. 139 */ 140 if (READ_ONCE(pm->work_pending) && 141 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) 142 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 143 144 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) 145 announce = true; 146 147 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); 148 spin_unlock_bh(&pm->lock); 149 150 if (announce) 151 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC); 152 } 153 154 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 155 { 156 pr_debug("msk=%p", msk); 157 } 158 159 void mptcp_pm_subflow_established(struct mptcp_sock *msk) 160 { 161 struct mptcp_pm_data *pm = &msk->pm; 162 163 pr_debug("msk=%p", msk); 164 165 if (!READ_ONCE(pm->work_pending)) 166 return; 167 168 spin_lock_bh(&pm->lock); 169 170 if (READ_ONCE(pm->work_pending)) 171 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 172 173 spin_unlock_bh(&pm->lock); 174 } 175 176 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, 177 const struct mptcp_subflow_context *subflow) 178 { 179 struct mptcp_pm_data *pm = &msk->pm; 180 bool update_subflows; 181 182 update_subflows = subflow->request_join || subflow->mp_join; 183 if (mptcp_pm_is_userspace(msk)) { 184 if (update_subflows) { 185 spin_lock_bh(&pm->lock); 186 pm->subflows--; 187 spin_unlock_bh(&pm->lock); 188 } 189 return; 190 } 191 192 if (!READ_ONCE(pm->work_pending) && !update_subflows) 193 return; 194 195 spin_lock_bh(&pm->lock); 196 if (update_subflows) 197 __mptcp_pm_close_subflow(msk); 198 199 /* Even if this subflow is not really established, tell the PM to try 200 * to pick the next ones, if possible. 201 */ 202 if (mptcp_pm_nl_check_work_pending(msk)) 203 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 204 205 spin_unlock_bh(&pm->lock); 206 } 207 208 void mptcp_pm_add_addr_received(const struct sock *ssk, 209 const struct mptcp_addr_info *addr) 210 { 211 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 212 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 213 struct mptcp_pm_data *pm = &msk->pm; 214 215 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 216 READ_ONCE(pm->accept_addr)); 217 218 mptcp_event_addr_announced(ssk, addr); 219 220 spin_lock_bh(&pm->lock); 221 222 if (mptcp_pm_is_userspace(msk)) { 223 if (mptcp_userspace_pm_active(msk)) { 224 mptcp_pm_announce_addr(msk, addr, true); 225 mptcp_pm_add_addr_send_ack(msk); 226 } else { 227 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 228 } 229 } else if (!READ_ONCE(pm->accept_addr)) { 230 mptcp_pm_announce_addr(msk, addr, true); 231 mptcp_pm_add_addr_send_ack(msk); 232 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 233 pm->remote = *addr; 234 } else { 235 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 236 } 237 238 spin_unlock_bh(&pm->lock); 239 } 240 241 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, 242 const struct mptcp_addr_info *addr) 243 { 244 struct mptcp_pm_data *pm = &msk->pm; 245 246 pr_debug("msk=%p", msk); 247 248 spin_lock_bh(&pm->lock); 249 250 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) 251 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 252 253 spin_unlock_bh(&pm->lock); 254 } 255 256 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 257 { 258 if (!mptcp_pm_should_add_signal(msk)) 259 return; 260 261 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 262 } 263 264 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, 265 const struct mptcp_rm_list *rm_list) 266 { 267 struct mptcp_pm_data *pm = &msk->pm; 268 u8 i; 269 270 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); 271 272 for (i = 0; i < rm_list->nr; i++) 273 mptcp_event_addr_removed(msk, rm_list->ids[i]); 274 275 spin_lock_bh(&pm->lock); 276 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) 277 pm->rm_list_rx = *rm_list; 278 else 279 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); 280 spin_unlock_bh(&pm->lock); 281 } 282 283 void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) 284 { 285 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 286 struct sock *sk = subflow->conn; 287 struct mptcp_sock *msk; 288 289 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); 290 msk = mptcp_sk(sk); 291 if (subflow->backup != bkup) 292 subflow->backup = bkup; 293 294 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); 295 } 296 297 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) 298 { 299 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 300 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 301 302 pr_debug("fail_seq=%llu", fail_seq); 303 304 if (!READ_ONCE(msk->allow_infinite_fallback)) 305 return; 306 307 if (!subflow->fail_tout) { 308 pr_debug("send MP_FAIL response and infinite map"); 309 310 subflow->send_mp_fail = 1; 311 subflow->send_infinite_map = 1; 312 tcp_send_ack(sk); 313 } else { 314 pr_debug("MP_FAIL response received"); 315 WRITE_ONCE(subflow->fail_tout, 0); 316 } 317 } 318 319 /* path manager helpers */ 320 321 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, 322 unsigned int opt_size, unsigned int remaining, 323 struct mptcp_addr_info *addr, bool *echo, 324 bool *drop_other_suboptions) 325 { 326 int ret = false; 327 u8 add_addr; 328 u8 family; 329 bool port; 330 331 spin_lock_bh(&msk->pm.lock); 332 333 /* double check after the lock is acquired */ 334 if (!mptcp_pm_should_add_signal(msk)) 335 goto out_unlock; 336 337 /* always drop every other options for pure ack ADD_ADDR; this is a 338 * plain dup-ack from TCP perspective. The other MPTCP-relevant info, 339 * if any, will be carried by the 'original' TCP ack 340 */ 341 if (skb && skb_is_tcp_pure_ack(skb)) { 342 remaining += opt_size; 343 *drop_other_suboptions = true; 344 } 345 346 *echo = mptcp_pm_should_add_signal_echo(msk); 347 port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); 348 349 family = *echo ? msk->pm.remote.family : msk->pm.local.family; 350 if (remaining < mptcp_add_addr_len(family, *echo, port)) 351 goto out_unlock; 352 353 if (*echo) { 354 *addr = msk->pm.remote; 355 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); 356 } else { 357 *addr = msk->pm.local; 358 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); 359 } 360 WRITE_ONCE(msk->pm.addr_signal, add_addr); 361 ret = true; 362 363 out_unlock: 364 spin_unlock_bh(&msk->pm.lock); 365 return ret; 366 } 367 368 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 369 struct mptcp_rm_list *rm_list) 370 { 371 int ret = false, len; 372 u8 rm_addr; 373 374 spin_lock_bh(&msk->pm.lock); 375 376 /* double check after the lock is acquired */ 377 if (!mptcp_pm_should_rm_signal(msk)) 378 goto out_unlock; 379 380 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); 381 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); 382 if (len < 0) { 383 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 384 goto out_unlock; 385 } 386 if (remaining < len) 387 goto out_unlock; 388 389 *rm_list = msk->pm.rm_list_tx; 390 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 391 ret = true; 392 393 out_unlock: 394 spin_unlock_bh(&msk->pm.lock); 395 return ret; 396 } 397 398 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 399 { 400 struct mptcp_addr_info skc_local; 401 struct mptcp_addr_info msk_local; 402 403 if (WARN_ON_ONCE(!msk)) 404 return -1; 405 406 /* The 0 ID mapping is defined by the first subflow, copied into the msk 407 * addr 408 */ 409 mptcp_local_address((struct sock_common *)msk, &msk_local); 410 mptcp_local_address((struct sock_common *)skc, &skc_local); 411 if (mptcp_addresses_equal(&msk_local, &skc_local, false)) 412 return 0; 413 414 if (mptcp_pm_is_userspace(msk)) 415 return mptcp_userspace_pm_get_local_id(msk, &skc_local); 416 return mptcp_pm_nl_get_local_id(msk, &skc_local); 417 } 418 419 bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc) 420 { 421 struct mptcp_addr_info skc_local; 422 423 mptcp_local_address((struct sock_common *)skc, &skc_local); 424 425 if (mptcp_pm_is_userspace(msk)) 426 return mptcp_userspace_pm_is_backup(msk, &skc_local); 427 428 return mptcp_pm_nl_is_backup(msk, &skc_local); 429 } 430 431 int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, 432 u8 *flags, int *ifindex) 433 { 434 *flags = 0; 435 *ifindex = 0; 436 437 if (mptcp_pm_is_userspace(msk)) 438 return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); 439 return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); 440 } 441 442 int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info) 443 { 444 if (info->attrs[MPTCP_PM_ATTR_TOKEN]) 445 return mptcp_userspace_pm_get_addr(skb, info); 446 return mptcp_pm_nl_get_addr(skb, info); 447 } 448 449 int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb) 450 { 451 const struct genl_info *info = genl_info_dump(cb); 452 453 if (info->attrs[MPTCP_PM_ATTR_TOKEN]) 454 return mptcp_userspace_pm_dump_addr(msg, cb); 455 return mptcp_pm_nl_dump_addr(msg, cb); 456 } 457 458 int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info) 459 { 460 if (info->attrs[MPTCP_PM_ATTR_TOKEN]) 461 return mptcp_userspace_pm_set_flags(skb, info); 462 return mptcp_pm_nl_set_flags(skb, info); 463 } 464 465 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 466 { 467 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 468 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); 469 470 /* keep track of rtx periods with no progress */ 471 if (!subflow->stale_count) { 472 subflow->stale_rcv_tstamp = rcv_tstamp; 473 subflow->stale_count++; 474 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { 475 if (subflow->stale_count < U8_MAX) 476 subflow->stale_count++; 477 mptcp_pm_nl_subflow_chk_stale(msk, ssk); 478 } else { 479 subflow->stale_count = 0; 480 mptcp_subflow_set_active(subflow); 481 } 482 } 483 484 /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, 485 * otherwise allow any matching local/remote pair 486 */ 487 bool mptcp_pm_addr_families_match(const struct sock *sk, 488 const struct mptcp_addr_info *loc, 489 const struct mptcp_addr_info *rem) 490 { 491 bool mptcp_is_v4 = sk->sk_family == AF_INET; 492 493 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 494 bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); 495 bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); 496 497 if (mptcp_is_v4) 498 return loc_is_v4 && rem_is_v4; 499 500 if (ipv6_only_sock(sk)) 501 return !loc_is_v4 && !rem_is_v4; 502 503 return loc_is_v4 == rem_is_v4; 504 #else 505 return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; 506 #endif 507 } 508 509 void mptcp_pm_data_reset(struct mptcp_sock *msk) 510 { 511 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk)); 512 struct mptcp_pm_data *pm = &msk->pm; 513 514 pm->add_addr_signaled = 0; 515 pm->add_addr_accepted = 0; 516 pm->local_addr_used = 0; 517 pm->subflows = 0; 518 pm->rm_list_tx.nr = 0; 519 pm->rm_list_rx.nr = 0; 520 WRITE_ONCE(pm->pm_type, pm_type); 521 522 if (pm_type == MPTCP_PM_TYPE_KERNEL) { 523 bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk); 524 525 /* pm->work_pending must be only be set to 'true' when 526 * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL 527 */ 528 WRITE_ONCE(pm->work_pending, 529 (!!mptcp_pm_get_local_addr_max(msk) && 530 subflows_allowed) || 531 !!mptcp_pm_get_add_addr_signal_max(msk)); 532 WRITE_ONCE(pm->accept_addr, 533 !!mptcp_pm_get_add_addr_accept_max(msk) && 534 subflows_allowed); 535 WRITE_ONCE(pm->accept_subflow, subflows_allowed); 536 } else { 537 WRITE_ONCE(pm->work_pending, 0); 538 WRITE_ONCE(pm->accept_addr, 0); 539 WRITE_ONCE(pm->accept_subflow, 0); 540 } 541 542 WRITE_ONCE(pm->addr_signal, 0); 543 WRITE_ONCE(pm->remote_deny_join_id0, false); 544 pm->status = 0; 545 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 546 } 547 548 void mptcp_pm_data_init(struct mptcp_sock *msk) 549 { 550 spin_lock_init(&msk->pm.lock); 551 INIT_LIST_HEAD(&msk->pm.anno_list); 552 INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list); 553 mptcp_pm_data_reset(msk); 554 } 555 556 void __init mptcp_pm_init(void) 557 { 558 mptcp_pm_nl_init(); 559 } 560