1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2017 - 2019, Intel Corporation. 5 */ 6 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <crypto/sha2.h> 11 #include <net/tcp.h> 12 #include <net/mptcp.h> 13 #include "protocol.h" 14 #include "mib.h" 15 16 #include <trace/events/mptcp.h> 17 18 static bool mptcp_cap_flag_sha256(u8 flags) 19 { 20 return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256; 21 } 22 23 static void mptcp_parse_option(const struct sk_buff *skb, 24 const unsigned char *ptr, int opsize, 25 struct mptcp_options_received *mp_opt) 26 { 27 u8 subtype = *ptr >> 4; 28 int expected_opsize; 29 u8 version; 30 u8 flags; 31 u8 i; 32 33 switch (subtype) { 34 case MPTCPOPT_MP_CAPABLE: 35 /* strict size checking */ 36 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 37 if (skb->len > tcp_hdr(skb)->doff << 2) 38 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA; 39 else 40 expected_opsize = TCPOLEN_MPTCP_MPC_ACK; 41 } else { 42 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) 43 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK; 44 else 45 expected_opsize = TCPOLEN_MPTCP_MPC_SYN; 46 } 47 48 /* Cfr RFC 8684 Section 3.3.0: 49 * If a checksum is present but its use had 50 * not been negotiated in the MP_CAPABLE handshake, the receiver MUST 51 * close the subflow with a RST, as it is not behaving as negotiated. 52 * If a checksum is not present when its use has been negotiated, the 53 * receiver MUST close the subflow with a RST, as it is considered 54 * broken 55 * We parse even option with mismatching csum presence, so that 56 * later in subflow_data_ready we can trigger the reset. 57 */ 58 if (opsize != expected_opsize && 59 (expected_opsize != TCPOLEN_MPTCP_MPC_ACK_DATA || 60 opsize != TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM)) 61 break; 62 63 /* try to be gentle vs future versions on the initial syn */ 64 version = *ptr++ & MPTCP_VERSION_MASK; 65 if (opsize != TCPOLEN_MPTCP_MPC_SYN) { 66 if (version != MPTCP_SUPPORTED_VERSION) 67 break; 68 } else if (version < MPTCP_SUPPORTED_VERSION) { 69 break; 70 } 71 72 flags = *ptr++; 73 if (!mptcp_cap_flag_sha256(flags) || 74 (flags & MPTCP_CAP_EXTENSIBILITY)) 75 break; 76 77 /* RFC 6824, Section 3.1: 78 * "For the Checksum Required bit (labeled "A"), if either 79 * host requires the use of checksums, checksums MUST be used. 80 * In other words, the only way for checksums not to be used 81 * is if both hosts in their SYNs set A=0." 82 */ 83 if (flags & MPTCP_CAP_CHECKSUM_REQD) 84 mp_opt->csum_reqd = 1; 85 86 if (flags & MPTCP_CAP_DENY_JOIN_ID0) 87 mp_opt->deny_join_id0 = 1; 88 89 mp_opt->mp_capable = 1; 90 if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) { 91 mp_opt->sndr_key = get_unaligned_be64(ptr); 92 ptr += 8; 93 } 94 if (opsize >= TCPOLEN_MPTCP_MPC_ACK) { 95 mp_opt->rcvr_key = get_unaligned_be64(ptr); 96 ptr += 8; 97 } 98 if (opsize >= TCPOLEN_MPTCP_MPC_ACK_DATA) { 99 /* Section 3.1.: 100 * "the data parameters in a MP_CAPABLE are semantically 101 * equivalent to those in a DSS option and can be used 102 * interchangeably." 103 */ 104 mp_opt->dss = 1; 105 mp_opt->use_map = 1; 106 mp_opt->mpc_map = 1; 107 mp_opt->data_len = get_unaligned_be16(ptr); 108 ptr += 2; 109 } 110 if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) { 111 mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr); 112 mp_opt->csum_reqd = 1; 113 ptr += 2; 114 } 115 pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u", 116 version, flags, opsize, mp_opt->sndr_key, 117 mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum); 118 break; 119 120 case MPTCPOPT_MP_JOIN: 121 mp_opt->mp_join = 1; 122 if (opsize == TCPOLEN_MPTCP_MPJ_SYN) { 123 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; 124 mp_opt->join_id = *ptr++; 125 mp_opt->token = get_unaligned_be32(ptr); 126 ptr += 4; 127 mp_opt->nonce = get_unaligned_be32(ptr); 128 ptr += 4; 129 pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u", 130 mp_opt->backup, mp_opt->join_id, 131 mp_opt->token, mp_opt->nonce); 132 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) { 133 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; 134 mp_opt->join_id = *ptr++; 135 mp_opt->thmac = get_unaligned_be64(ptr); 136 ptr += 8; 137 mp_opt->nonce = get_unaligned_be32(ptr); 138 ptr += 4; 139 pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u", 140 mp_opt->backup, mp_opt->join_id, 141 mp_opt->thmac, mp_opt->nonce); 142 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) { 143 ptr += 2; 144 memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); 145 pr_debug("MP_JOIN hmac"); 146 } else { 147 mp_opt->mp_join = 0; 148 } 149 break; 150 151 case MPTCPOPT_DSS: 152 pr_debug("DSS"); 153 ptr++; 154 155 /* we must clear 'mpc_map' be able to detect MP_CAPABLE 156 * map vs DSS map in mptcp_incoming_options(), and reconstruct 157 * map info accordingly 158 */ 159 mp_opt->mpc_map = 0; 160 flags = (*ptr++) & MPTCP_DSS_FLAG_MASK; 161 mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0; 162 mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0; 163 mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0; 164 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0; 165 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK); 166 167 pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d", 168 mp_opt->data_fin, mp_opt->dsn64, 169 mp_opt->use_map, mp_opt->ack64, 170 mp_opt->use_ack); 171 172 expected_opsize = TCPOLEN_MPTCP_DSS_BASE; 173 174 if (mp_opt->use_ack) { 175 if (mp_opt->ack64) 176 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64; 177 else 178 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32; 179 } 180 181 if (mp_opt->use_map) { 182 if (mp_opt->dsn64) 183 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64; 184 else 185 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32; 186 } 187 188 /* Always parse any csum presence combination, we will enforce 189 * RFC 8684 Section 3.3.0 checks later in subflow_data_ready 190 */ 191 if (opsize != expected_opsize && 192 opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) 193 break; 194 195 mp_opt->dss = 1; 196 197 if (mp_opt->use_ack) { 198 if (mp_opt->ack64) { 199 mp_opt->data_ack = get_unaligned_be64(ptr); 200 ptr += 8; 201 } else { 202 mp_opt->data_ack = get_unaligned_be32(ptr); 203 ptr += 4; 204 } 205 206 pr_debug("data_ack=%llu", mp_opt->data_ack); 207 } 208 209 if (mp_opt->use_map) { 210 if (mp_opt->dsn64) { 211 mp_opt->data_seq = get_unaligned_be64(ptr); 212 ptr += 8; 213 } else { 214 mp_opt->data_seq = get_unaligned_be32(ptr); 215 ptr += 4; 216 } 217 218 mp_opt->subflow_seq = get_unaligned_be32(ptr); 219 ptr += 4; 220 221 mp_opt->data_len = get_unaligned_be16(ptr); 222 ptr += 2; 223 224 if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) { 225 mp_opt->csum_reqd = 1; 226 mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr); 227 ptr += 2; 228 } 229 230 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", 231 mp_opt->data_seq, mp_opt->subflow_seq, 232 mp_opt->data_len, mp_opt->csum_reqd, mp_opt->csum); 233 } 234 235 break; 236 237 case MPTCPOPT_ADD_ADDR: 238 mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO; 239 if (!mp_opt->echo) { 240 if (opsize == TCPOLEN_MPTCP_ADD_ADDR || 241 opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT) 242 mp_opt->addr.family = AF_INET; 243 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 244 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 || 245 opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT) 246 mp_opt->addr.family = AF_INET6; 247 #endif 248 else 249 break; 250 } else { 251 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE || 252 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) 253 mp_opt->addr.family = AF_INET; 254 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 255 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE || 256 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) 257 mp_opt->addr.family = AF_INET6; 258 #endif 259 else 260 break; 261 } 262 263 mp_opt->add_addr = 1; 264 mp_opt->addr.id = *ptr++; 265 if (mp_opt->addr.family == AF_INET) { 266 memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4); 267 ptr += 4; 268 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT || 269 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) { 270 mp_opt->addr.port = htons(get_unaligned_be16(ptr)); 271 ptr += 2; 272 } 273 } 274 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 275 else { 276 memcpy(mp_opt->addr.addr6.s6_addr, (u8 *)ptr, 16); 277 ptr += 16; 278 if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT || 279 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) { 280 mp_opt->addr.port = htons(get_unaligned_be16(ptr)); 281 ptr += 2; 282 } 283 } 284 #endif 285 if (!mp_opt->echo) { 286 mp_opt->ahmac = get_unaligned_be64(ptr); 287 ptr += 8; 288 } 289 pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d", 290 (mp_opt->addr.family == AF_INET6) ? "6" : "", 291 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port)); 292 break; 293 294 case MPTCPOPT_RM_ADDR: 295 if (opsize < TCPOLEN_MPTCP_RM_ADDR_BASE + 1 || 296 opsize > TCPOLEN_MPTCP_RM_ADDR_BASE + MPTCP_RM_IDS_MAX) 297 break; 298 299 ptr++; 300 301 mp_opt->rm_addr = 1; 302 mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE; 303 for (i = 0; i < mp_opt->rm_list.nr; i++) 304 mp_opt->rm_list.ids[i] = *ptr++; 305 pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr); 306 break; 307 308 case MPTCPOPT_MP_PRIO: 309 if (opsize != TCPOLEN_MPTCP_PRIO) 310 break; 311 312 mp_opt->mp_prio = 1; 313 mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP; 314 pr_debug("MP_PRIO: prio=%d", mp_opt->backup); 315 break; 316 317 case MPTCPOPT_MP_FASTCLOSE: 318 if (opsize != TCPOLEN_MPTCP_FASTCLOSE) 319 break; 320 321 ptr += 2; 322 mp_opt->rcvr_key = get_unaligned_be64(ptr); 323 ptr += 8; 324 mp_opt->fastclose = 1; 325 break; 326 327 case MPTCPOPT_RST: 328 if (opsize != TCPOLEN_MPTCP_RST) 329 break; 330 331 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) 332 break; 333 mp_opt->reset = 1; 334 flags = *ptr++; 335 mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT; 336 mp_opt->reset_reason = *ptr; 337 break; 338 339 case MPTCPOPT_MP_FAIL: 340 if (opsize != TCPOLEN_MPTCP_FAIL) 341 break; 342 343 ptr += 2; 344 mp_opt->mp_fail = 1; 345 mp_opt->fail_seq = get_unaligned_be64(ptr); 346 pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq); 347 break; 348 349 default: 350 break; 351 } 352 } 353 354 void mptcp_get_options(const struct sock *sk, 355 const struct sk_buff *skb, 356 struct mptcp_options_received *mp_opt) 357 { 358 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 359 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 360 const struct tcphdr *th = tcp_hdr(skb); 361 const unsigned char *ptr; 362 int length; 363 364 /* initialize option status */ 365 mp_opt->mp_capable = 0; 366 mp_opt->mp_join = 0; 367 mp_opt->add_addr = 0; 368 mp_opt->ahmac = 0; 369 mp_opt->fastclose = 0; 370 mp_opt->addr.port = 0; 371 mp_opt->rm_addr = 0; 372 mp_opt->dss = 0; 373 mp_opt->mp_prio = 0; 374 mp_opt->reset = 0; 375 mp_opt->csum_reqd = READ_ONCE(msk->csum_enabled); 376 mp_opt->deny_join_id0 = 0; 377 mp_opt->mp_fail = 0; 378 379 length = (th->doff * 4) - sizeof(struct tcphdr); 380 ptr = (const unsigned char *)(th + 1); 381 382 while (length > 0) { 383 int opcode = *ptr++; 384 int opsize; 385 386 switch (opcode) { 387 case TCPOPT_EOL: 388 return; 389 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 390 length--; 391 continue; 392 default: 393 if (length < 2) 394 return; 395 opsize = *ptr++; 396 if (opsize < 2) /* "silly options" */ 397 return; 398 if (opsize > length) 399 return; /* don't parse partial options */ 400 if (opcode == TCPOPT_MPTCP) 401 mptcp_parse_option(skb, ptr, opsize, mp_opt); 402 ptr += opsize - 2; 403 length -= opsize; 404 } 405 } 406 } 407 408 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, 409 unsigned int *size, struct mptcp_out_options *opts) 410 { 411 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 412 413 /* we will use snd_isn to detect first pkt [re]transmission 414 * in mptcp_established_options_mp() 415 */ 416 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; 417 if (subflow->request_mptcp) { 418 opts->suboptions = OPTION_MPTCP_MPC_SYN; 419 opts->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk)); 420 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk)); 421 *size = TCPOLEN_MPTCP_MPC_SYN; 422 return true; 423 } else if (subflow->request_join) { 424 pr_debug("remote_token=%u, nonce=%u", subflow->remote_token, 425 subflow->local_nonce); 426 opts->suboptions = OPTION_MPTCP_MPJ_SYN; 427 opts->join_id = subflow->local_id; 428 opts->token = subflow->remote_token; 429 opts->nonce = subflow->local_nonce; 430 opts->backup = subflow->request_bkup; 431 *size = TCPOLEN_MPTCP_MPJ_SYN; 432 return true; 433 } 434 return false; 435 } 436 437 /* MP_JOIN client subflow must wait for 4th ack before sending any data: 438 * TCP can't schedule delack timer before the subflow is fully established. 439 * MPTCP uses the delack timer to do 3rd ack retransmissions 440 */ 441 static void schedule_3rdack_retransmission(struct sock *sk) 442 { 443 struct inet_connection_sock *icsk = inet_csk(sk); 444 struct tcp_sock *tp = tcp_sk(sk); 445 unsigned long timeout; 446 447 /* reschedule with a timeout above RTT, as we must look only for drop */ 448 if (tp->srtt_us) 449 timeout = tp->srtt_us << 1; 450 else 451 timeout = TCP_TIMEOUT_INIT; 452 453 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); 454 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 455 icsk->icsk_ack.timeout = timeout; 456 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 457 } 458 459 static void clear_3rdack_retransmission(struct sock *sk) 460 { 461 struct inet_connection_sock *icsk = inet_csk(sk); 462 463 sk_stop_timer(sk, &icsk->icsk_delack_timer); 464 icsk->icsk_ack.timeout = 0; 465 icsk->icsk_ack.ato = 0; 466 icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER); 467 } 468 469 static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, 470 bool snd_data_fin_enable, 471 unsigned int *size, 472 unsigned int remaining, 473 struct mptcp_out_options *opts) 474 { 475 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 476 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 477 struct mptcp_ext *mpext; 478 unsigned int data_len; 479 u8 len; 480 481 /* When skb is not available, we better over-estimate the emitted 482 * options len. A full DSS option (28 bytes) is longer than 483 * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so 484 * tell the caller to defer the estimate to 485 * mptcp_established_options_dss(), which will reserve enough space. 486 */ 487 if (!skb) 488 return false; 489 490 /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */ 491 if (subflow->fully_established || snd_data_fin_enable || 492 subflow->snd_isn != TCP_SKB_CB(skb)->seq || 493 sk->sk_state != TCP_ESTABLISHED) 494 return false; 495 496 if (subflow->mp_capable) { 497 mpext = mptcp_get_ext(skb); 498 data_len = mpext ? mpext->data_len : 0; 499 500 /* we will check ext_copy.data_len in mptcp_write_options() to 501 * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and 502 * TCPOLEN_MPTCP_MPC_ACK 503 */ 504 opts->ext_copy.data_len = data_len; 505 opts->suboptions = OPTION_MPTCP_MPC_ACK; 506 opts->sndr_key = subflow->local_key; 507 opts->rcvr_key = subflow->remote_key; 508 opts->csum_reqd = READ_ONCE(msk->csum_enabled); 509 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk)); 510 511 /* Section 3.1. 512 * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK 513 * packets that start the first subflow of an MPTCP connection, 514 * as well as the first packet that carries data 515 */ 516 if (data_len > 0) { 517 len = TCPOLEN_MPTCP_MPC_ACK_DATA; 518 if (opts->csum_reqd) { 519 /* we need to propagate more info to csum the pseudo hdr */ 520 opts->ext_copy.data_seq = mpext->data_seq; 521 opts->ext_copy.subflow_seq = mpext->subflow_seq; 522 opts->ext_copy.csum = mpext->csum; 523 len += TCPOLEN_MPTCP_DSS_CHECKSUM; 524 } 525 *size = ALIGN(len, 4); 526 } else { 527 *size = TCPOLEN_MPTCP_MPC_ACK; 528 } 529 530 pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d", 531 subflow, subflow->local_key, subflow->remote_key, 532 data_len); 533 534 return true; 535 } else if (subflow->mp_join) { 536 opts->suboptions = OPTION_MPTCP_MPJ_ACK; 537 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN); 538 *size = TCPOLEN_MPTCP_MPJ_ACK; 539 pr_debug("subflow=%p", subflow); 540 541 schedule_3rdack_retransmission(sk); 542 return true; 543 } 544 return false; 545 } 546 547 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, 548 struct sk_buff *skb, struct mptcp_ext *ext) 549 { 550 /* The write_seq value has already been incremented, so the actual 551 * sequence number for the DATA_FIN is one less. 552 */ 553 u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1; 554 555 if (!ext->use_map || !skb->len) { 556 /* RFC6824 requires a DSS mapping with specific values 557 * if DATA_FIN is set but no data payload is mapped 558 */ 559 ext->data_fin = 1; 560 ext->use_map = 1; 561 ext->dsn64 = 1; 562 ext->data_seq = data_fin_tx_seq; 563 ext->subflow_seq = 0; 564 ext->data_len = 1; 565 } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) { 566 /* If there's an existing DSS mapping and it is the 567 * final mapping, DATA_FIN consumes 1 additional byte of 568 * mapping space. 569 */ 570 ext->data_fin = 1; 571 ext->data_len++; 572 } 573 } 574 575 static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, 576 bool snd_data_fin_enable, 577 unsigned int *size, 578 unsigned int remaining, 579 struct mptcp_out_options *opts) 580 { 581 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 582 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 583 unsigned int dss_size = 0; 584 struct mptcp_ext *mpext; 585 unsigned int ack_size; 586 bool ret = false; 587 u64 ack_seq; 588 589 opts->csum_reqd = READ_ONCE(msk->csum_enabled); 590 mpext = skb ? mptcp_get_ext(skb) : NULL; 591 592 if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) { 593 unsigned int map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64; 594 595 if (mpext) { 596 if (opts->csum_reqd) 597 map_size += TCPOLEN_MPTCP_DSS_CHECKSUM; 598 599 opts->ext_copy = *mpext; 600 } 601 602 remaining -= map_size; 603 dss_size = map_size; 604 if (skb && snd_data_fin_enable) 605 mptcp_write_data_fin(subflow, skb, &opts->ext_copy); 606 opts->suboptions = OPTION_MPTCP_DSS; 607 ret = true; 608 } 609 610 /* passive sockets msk will set the 'can_ack' after accept(), even 611 * if the first subflow may have the already the remote key handy 612 */ 613 opts->ext_copy.use_ack = 0; 614 if (!READ_ONCE(msk->can_ack)) { 615 *size = ALIGN(dss_size, 4); 616 return ret; 617 } 618 619 ack_seq = READ_ONCE(msk->ack_seq); 620 if (READ_ONCE(msk->use_64bit_ack)) { 621 ack_size = TCPOLEN_MPTCP_DSS_ACK64; 622 opts->ext_copy.data_ack = ack_seq; 623 opts->ext_copy.ack64 = 1; 624 } else { 625 ack_size = TCPOLEN_MPTCP_DSS_ACK32; 626 opts->ext_copy.data_ack32 = (uint32_t)ack_seq; 627 opts->ext_copy.ack64 = 0; 628 } 629 opts->ext_copy.use_ack = 1; 630 opts->suboptions = OPTION_MPTCP_DSS; 631 WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk)); 632 633 /* Add kind/length/subtype/flag overhead if mapping is not populated */ 634 if (dss_size == 0) 635 ack_size += TCPOLEN_MPTCP_DSS_BASE; 636 637 dss_size += ack_size; 638 639 *size = ALIGN(dss_size, 4); 640 return true; 641 } 642 643 static u64 add_addr_generate_hmac(u64 key1, u64 key2, 644 struct mptcp_addr_info *addr) 645 { 646 u16 port = ntohs(addr->port); 647 u8 hmac[SHA256_DIGEST_SIZE]; 648 u8 msg[19]; 649 int i = 0; 650 651 msg[i++] = addr->id; 652 if (addr->family == AF_INET) { 653 memcpy(&msg[i], &addr->addr.s_addr, 4); 654 i += 4; 655 } 656 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 657 else if (addr->family == AF_INET6) { 658 memcpy(&msg[i], &addr->addr6.s6_addr, 16); 659 i += 16; 660 } 661 #endif 662 msg[i++] = port >> 8; 663 msg[i++] = port & 0xFF; 664 665 mptcp_crypto_hmac_sha(key1, key2, msg, i, hmac); 666 667 return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); 668 } 669 670 static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb, 671 unsigned int *size, 672 unsigned int remaining, 673 struct mptcp_out_options *opts) 674 { 675 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 676 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 677 bool drop_other_suboptions = false; 678 unsigned int opt_size = *size; 679 bool echo; 680 bool port; 681 int len; 682 683 /* add addr will strip the existing options, be sure to avoid breaking 684 * MPC/MPJ handshakes 685 */ 686 if (!mptcp_pm_should_add_signal(msk) || 687 (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) || 688 !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr, 689 &echo, &port, &drop_other_suboptions)) 690 return false; 691 692 if (drop_other_suboptions) 693 remaining += opt_size; 694 len = mptcp_add_addr_len(opts->addr.family, echo, port); 695 if (remaining < len) 696 return false; 697 698 *size = len; 699 if (drop_other_suboptions) { 700 pr_debug("drop other suboptions"); 701 opts->suboptions = 0; 702 703 /* note that e.g. DSS could have written into the memory 704 * aliased by ahmac, we must reset the field here 705 * to avoid appending the hmac even for ADD_ADDR echo 706 * options 707 */ 708 opts->ahmac = 0; 709 *size -= opt_size; 710 } 711 opts->suboptions |= OPTION_MPTCP_ADD_ADDR; 712 if (!echo) { 713 opts->ahmac = add_addr_generate_hmac(msk->local_key, 714 msk->remote_key, 715 &opts->addr); 716 } 717 pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d", 718 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port)); 719 720 return true; 721 } 722 723 static bool mptcp_established_options_rm_addr(struct sock *sk, 724 unsigned int *size, 725 unsigned int remaining, 726 struct mptcp_out_options *opts) 727 { 728 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 729 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 730 struct mptcp_rm_list rm_list; 731 int i, len; 732 733 if (!mptcp_pm_should_rm_signal(msk) || 734 !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_list))) 735 return false; 736 737 len = mptcp_rm_addr_len(&rm_list); 738 if (len < 0) 739 return false; 740 if (remaining < len) 741 return false; 742 743 *size = len; 744 opts->suboptions |= OPTION_MPTCP_RM_ADDR; 745 opts->rm_list = rm_list; 746 747 for (i = 0; i < opts->rm_list.nr; i++) 748 pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]); 749 750 return true; 751 } 752 753 static bool mptcp_established_options_mp_prio(struct sock *sk, 754 unsigned int *size, 755 unsigned int remaining, 756 struct mptcp_out_options *opts) 757 { 758 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 759 760 /* can't send MP_PRIO with MPC, as they share the same option space: 761 * 'backup'. Also it makes no sense at all 762 */ 763 if (!subflow->send_mp_prio || 764 ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | 765 OPTION_MPTCP_MPC_ACK) & opts->suboptions)) 766 return false; 767 768 /* account for the trailing 'nop' option */ 769 if (remaining < TCPOLEN_MPTCP_PRIO_ALIGN) 770 return false; 771 772 *size = TCPOLEN_MPTCP_PRIO_ALIGN; 773 opts->suboptions |= OPTION_MPTCP_PRIO; 774 opts->backup = subflow->request_bkup; 775 776 pr_debug("prio=%d", opts->backup); 777 778 return true; 779 } 780 781 static noinline bool mptcp_established_options_rst(struct sock *sk, struct sk_buff *skb, 782 unsigned int *size, 783 unsigned int remaining, 784 struct mptcp_out_options *opts) 785 { 786 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 787 788 if (remaining < TCPOLEN_MPTCP_RST) 789 return false; 790 791 *size = TCPOLEN_MPTCP_RST; 792 opts->suboptions |= OPTION_MPTCP_RST; 793 opts->reset_transient = subflow->reset_transient; 794 opts->reset_reason = subflow->reset_reason; 795 796 return true; 797 } 798 799 static bool mptcp_established_options_mp_fail(struct sock *sk, 800 unsigned int *size, 801 unsigned int remaining, 802 struct mptcp_out_options *opts) 803 { 804 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 805 806 if (likely(!subflow->send_mp_fail)) 807 return false; 808 809 if (remaining < TCPOLEN_MPTCP_FAIL) 810 return false; 811 812 *size = TCPOLEN_MPTCP_FAIL; 813 opts->suboptions |= OPTION_MPTCP_FAIL; 814 opts->fail_seq = subflow->map_seq; 815 816 pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq); 817 818 return true; 819 } 820 821 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, 822 unsigned int *size, unsigned int remaining, 823 struct mptcp_out_options *opts) 824 { 825 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 826 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 827 unsigned int opt_size = 0; 828 bool snd_data_fin; 829 bool ret = false; 830 831 opts->suboptions = 0; 832 833 if (unlikely(__mptcp_check_fallback(msk))) 834 return false; 835 836 if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) { 837 if (mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) { 838 *size += opt_size; 839 remaining -= opt_size; 840 } 841 if (mptcp_established_options_rst(sk, skb, &opt_size, remaining, opts)) { 842 *size += opt_size; 843 remaining -= opt_size; 844 } 845 return true; 846 } 847 848 snd_data_fin = mptcp_data_fin_enabled(msk); 849 if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts)) 850 ret = true; 851 else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts)) { 852 ret = true; 853 if (mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) { 854 *size += opt_size; 855 remaining -= opt_size; 856 return true; 857 } 858 } 859 860 /* we reserved enough space for the above options, and exceeding the 861 * TCP option space would be fatal 862 */ 863 if (WARN_ON_ONCE(opt_size > remaining)) 864 return false; 865 866 *size += opt_size; 867 remaining -= opt_size; 868 if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) { 869 *size += opt_size; 870 remaining -= opt_size; 871 ret = true; 872 } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) { 873 *size += opt_size; 874 remaining -= opt_size; 875 ret = true; 876 } 877 878 if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) { 879 *size += opt_size; 880 remaining -= opt_size; 881 ret = true; 882 } 883 884 return ret; 885 } 886 887 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, 888 struct mptcp_out_options *opts) 889 { 890 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 891 892 if (subflow_req->mp_capable) { 893 opts->suboptions = OPTION_MPTCP_MPC_SYNACK; 894 opts->sndr_key = subflow_req->local_key; 895 opts->csum_reqd = subflow_req->csum_reqd; 896 opts->allow_join_id0 = subflow_req->allow_join_id0; 897 *size = TCPOLEN_MPTCP_MPC_SYNACK; 898 pr_debug("subflow_req=%p, local_key=%llu", 899 subflow_req, subflow_req->local_key); 900 return true; 901 } else if (subflow_req->mp_join) { 902 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK; 903 opts->backup = subflow_req->backup; 904 opts->join_id = subflow_req->local_id; 905 opts->thmac = subflow_req->thmac; 906 opts->nonce = subflow_req->local_nonce; 907 pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u", 908 subflow_req, opts->backup, opts->join_id, 909 opts->thmac, opts->nonce); 910 *size = TCPOLEN_MPTCP_MPJ_SYNACK; 911 return true; 912 } 913 return false; 914 } 915 916 static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, 917 struct mptcp_subflow_context *subflow, 918 struct sk_buff *skb, 919 struct mptcp_options_received *mp_opt) 920 { 921 /* here we can process OoO, in-window pkts, only in-sequence 4th ack 922 * will make the subflow fully established 923 */ 924 if (likely(subflow->fully_established)) { 925 /* on passive sockets, check for 3rd ack retransmission 926 * note that msk is always set by subflow_syn_recv_sock() 927 * for mp_join subflows 928 */ 929 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 && 930 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && 931 subflow->mp_join && mp_opt->mp_join && 932 READ_ONCE(msk->pm.server_side)) 933 tcp_send_ack(ssk); 934 goto fully_established; 935 } 936 937 /* we must process OoO packets before the first subflow is fully 938 * established. OoO packets are instead a protocol violation 939 * for MP_JOIN subflows as the peer must not send any data 940 * before receiving the forth ack - cfr. RFC 8684 section 3.2. 941 */ 942 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) { 943 if (subflow->mp_join) 944 goto reset; 945 return subflow->mp_capable; 946 } 947 948 if ((mp_opt->dss && mp_opt->use_ack) || 949 (mp_opt->add_addr && !mp_opt->echo)) { 950 /* subflows are fully established as soon as we get any 951 * additional ack, including ADD_ADDR. 952 */ 953 subflow->fully_established = 1; 954 WRITE_ONCE(msk->fully_established, true); 955 goto fully_established; 956 } 957 958 /* If the first established packet does not contain MP_CAPABLE + data 959 * then fallback to TCP. Fallback scenarios requires a reset for 960 * MP_JOIN subflows. 961 */ 962 if (!mp_opt->mp_capable) { 963 if (subflow->mp_join) 964 goto reset; 965 subflow->mp_capable = 0; 966 pr_fallback(msk); 967 __mptcp_do_fallback(msk); 968 return false; 969 } 970 971 if (mp_opt->deny_join_id0) 972 WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 973 974 if (unlikely(!READ_ONCE(msk->pm.server_side))) 975 pr_warn_once("bogus mpc option on established client sk"); 976 mptcp_subflow_fully_established(subflow, mp_opt); 977 978 fully_established: 979 /* if the subflow is not already linked into the conn_list, we can't 980 * notify the PM: this subflow is still on the listener queue 981 * and the PM possibly acquiring the subflow lock could race with 982 * the listener close 983 */ 984 if (likely(subflow->pm_notified) || list_empty(&subflow->node)) 985 return true; 986 987 subflow->pm_notified = 1; 988 if (subflow->mp_join) { 989 clear_3rdack_retransmission(ssk); 990 mptcp_pm_subflow_established(msk); 991 } else { 992 mptcp_pm_fully_established(msk, ssk, GFP_ATOMIC); 993 } 994 return true; 995 996 reset: 997 mptcp_subflow_reset(ssk); 998 return false; 999 } 1000 1001 u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq) 1002 { 1003 u32 old_seq32, cur_seq32; 1004 1005 old_seq32 = (u32)old_seq; 1006 cur_seq32 = (u32)cur_seq; 1007 cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32; 1008 if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32))) 1009 return cur_seq + (1LL << 32); 1010 1011 /* reverse wrap could happen, too */ 1012 if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32))) 1013 return cur_seq - (1LL << 32); 1014 return cur_seq; 1015 } 1016 1017 static void ack_update_msk(struct mptcp_sock *msk, 1018 struct sock *ssk, 1019 struct mptcp_options_received *mp_opt) 1020 { 1021 u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt); 1022 struct sock *sk = (struct sock *)msk; 1023 u64 old_snd_una; 1024 1025 mptcp_data_lock(sk); 1026 1027 /* avoid ack expansion on update conflict, to reduce the risk of 1028 * wrongly expanding to a future ack sequence number, which is way 1029 * more dangerous than missing an ack 1030 */ 1031 old_snd_una = msk->snd_una; 1032 new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64); 1033 1034 /* ACK for data not even sent yet and even above recovery bound? Ignore.*/ 1035 if (unlikely(after64(new_snd_una, snd_nxt))) { 1036 if (!msk->recovery || after64(new_snd_una, msk->recovery_snd_nxt)) 1037 new_snd_una = old_snd_una; 1038 } 1039 1040 new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd; 1041 1042 if (after64(new_wnd_end, msk->wnd_end)) 1043 msk->wnd_end = new_wnd_end; 1044 1045 /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */ 1046 if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt))) 1047 __mptcp_check_push(sk, ssk); 1048 1049 if (after64(new_snd_una, old_snd_una)) { 1050 msk->snd_una = new_snd_una; 1051 __mptcp_data_acked(sk); 1052 } 1053 mptcp_data_unlock(sk); 1054 1055 trace_ack_update_msk(mp_opt->data_ack, 1056 old_snd_una, new_snd_una, 1057 new_wnd_end, msk->wnd_end); 1058 } 1059 1060 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit) 1061 { 1062 /* Skip if DATA_FIN was already received. 1063 * If updating simultaneously with the recvmsg loop, values 1064 * should match. If they mismatch, the peer is misbehaving and 1065 * we will prefer the most recent information. 1066 */ 1067 if (READ_ONCE(msk->rcv_data_fin)) 1068 return false; 1069 1070 WRITE_ONCE(msk->rcv_data_fin_seq, 1071 mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit)); 1072 WRITE_ONCE(msk->rcv_data_fin, 1); 1073 1074 return true; 1075 } 1076 1077 static bool add_addr_hmac_valid(struct mptcp_sock *msk, 1078 struct mptcp_options_received *mp_opt) 1079 { 1080 u64 hmac = 0; 1081 1082 if (mp_opt->echo) 1083 return true; 1084 1085 hmac = add_addr_generate_hmac(msk->remote_key, 1086 msk->local_key, 1087 &mp_opt->addr); 1088 1089 pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n", 1090 msk, (unsigned long long)hmac, 1091 (unsigned long long)mp_opt->ahmac); 1092 1093 return hmac == mp_opt->ahmac; 1094 } 1095 1096 /* Return false if a subflow has been reset, else return true */ 1097 bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) 1098 { 1099 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 1100 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 1101 struct mptcp_options_received mp_opt; 1102 struct mptcp_ext *mpext; 1103 1104 if (__mptcp_check_fallback(msk)) { 1105 /* Keep it simple and unconditionally trigger send data cleanup and 1106 * pending queue spooling. We will need to acquire the data lock 1107 * for more accurate checks, and once the lock is acquired, such 1108 * helpers are cheap. 1109 */ 1110 mptcp_data_lock(subflow->conn); 1111 if (sk_stream_memory_free(sk)) 1112 __mptcp_check_push(subflow->conn, sk); 1113 __mptcp_data_acked(subflow->conn); 1114 mptcp_data_unlock(subflow->conn); 1115 return true; 1116 } 1117 1118 mptcp_get_options(sk, skb, &mp_opt); 1119 1120 /* The subflow can be in close state only if check_fully_established() 1121 * just sent a reset. If so, tell the caller to ignore the current packet. 1122 */ 1123 if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) 1124 return sk->sk_state != TCP_CLOSE; 1125 1126 if (mp_opt.fastclose && 1127 msk->local_key == mp_opt.rcvr_key) { 1128 WRITE_ONCE(msk->rcv_fastclose, true); 1129 mptcp_schedule_work((struct sock *)msk); 1130 } 1131 1132 if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) { 1133 if (!mp_opt.echo) { 1134 mptcp_pm_add_addr_received(msk, &mp_opt.addr); 1135 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); 1136 } else { 1137 mptcp_pm_add_addr_echoed(msk, &mp_opt.addr); 1138 mptcp_pm_del_add_timer(msk, &mp_opt.addr, true); 1139 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); 1140 } 1141 1142 if (mp_opt.addr.port) 1143 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD); 1144 1145 mp_opt.add_addr = 0; 1146 } 1147 1148 if (mp_opt.rm_addr) { 1149 mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list); 1150 mp_opt.rm_addr = 0; 1151 } 1152 1153 if (mp_opt.mp_prio) { 1154 mptcp_pm_mp_prio_received(sk, mp_opt.backup); 1155 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX); 1156 mp_opt.mp_prio = 0; 1157 } 1158 1159 if (mp_opt.mp_fail) { 1160 mptcp_pm_mp_fail_received(sk, mp_opt.fail_seq); 1161 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILRX); 1162 mp_opt.mp_fail = 0; 1163 } 1164 1165 if (mp_opt.reset) { 1166 subflow->reset_seen = 1; 1167 subflow->reset_reason = mp_opt.reset_reason; 1168 subflow->reset_transient = mp_opt.reset_transient; 1169 } 1170 1171 if (!mp_opt.dss) 1172 return true; 1173 1174 /* we can't wait for recvmsg() to update the ack_seq, otherwise 1175 * monodirectional flows will stuck 1176 */ 1177 if (mp_opt.use_ack) 1178 ack_update_msk(msk, sk, &mp_opt); 1179 1180 /* Zero-data-length packets are dropped by the caller and not 1181 * propagated to the MPTCP layer, so the skb extension does not 1182 * need to be allocated or populated. DATA_FIN information, if 1183 * present, needs to be updated here before the skb is freed. 1184 */ 1185 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 1186 if (mp_opt.data_fin && mp_opt.data_len == 1 && 1187 mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) && 1188 schedule_work(&msk->work)) 1189 sock_hold(subflow->conn); 1190 1191 return true; 1192 } 1193 1194 mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 1195 if (!mpext) 1196 return true; 1197 1198 memset(mpext, 0, sizeof(*mpext)); 1199 1200 if (mp_opt.use_map) { 1201 if (mp_opt.mpc_map) { 1202 /* this is an MP_CAPABLE carrying MPTCP data 1203 * we know this map the first chunk of data 1204 */ 1205 mptcp_crypto_key_sha(subflow->remote_key, NULL, 1206 &mpext->data_seq); 1207 mpext->data_seq++; 1208 mpext->subflow_seq = 1; 1209 mpext->dsn64 = 1; 1210 mpext->mpc_map = 1; 1211 mpext->data_fin = 0; 1212 } else { 1213 mpext->data_seq = mp_opt.data_seq; 1214 mpext->subflow_seq = mp_opt.subflow_seq; 1215 mpext->dsn64 = mp_opt.dsn64; 1216 mpext->data_fin = mp_opt.data_fin; 1217 } 1218 mpext->data_len = mp_opt.data_len; 1219 mpext->use_map = 1; 1220 mpext->csum_reqd = mp_opt.csum_reqd; 1221 1222 if (mpext->csum_reqd) 1223 mpext->csum = mp_opt.csum; 1224 } 1225 1226 return true; 1227 } 1228 1229 static void mptcp_set_rwin(const struct tcp_sock *tp) 1230 { 1231 const struct sock *ssk = (const struct sock *)tp; 1232 const struct mptcp_subflow_context *subflow; 1233 struct mptcp_sock *msk; 1234 u64 ack_seq; 1235 1236 subflow = mptcp_subflow_ctx(ssk); 1237 msk = mptcp_sk(subflow->conn); 1238 1239 ack_seq = READ_ONCE(msk->ack_seq) + tp->rcv_wnd; 1240 1241 if (after64(ack_seq, READ_ONCE(msk->rcv_wnd_sent))) 1242 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); 1243 } 1244 1245 static u16 mptcp_make_csum(const struct mptcp_ext *mpext) 1246 { 1247 struct csum_pseudo_header header; 1248 __wsum csum; 1249 1250 /* cfr RFC 8684 3.3.1.: 1251 * the data sequence number used in the pseudo-header is 1252 * always the 64-bit value, irrespective of what length is used in the 1253 * DSS option itself. 1254 */ 1255 header.data_seq = cpu_to_be64(mpext->data_seq); 1256 header.subflow_seq = htonl(mpext->subflow_seq); 1257 header.data_len = htons(mpext->data_len); 1258 header.csum = 0; 1259 1260 csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum)); 1261 return (__force u16)csum_fold(csum); 1262 } 1263 1264 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, 1265 struct mptcp_out_options *opts) 1266 { 1267 if (unlikely(OPTION_MPTCP_FAIL & opts->suboptions)) { 1268 const struct sock *ssk = (const struct sock *)tp; 1269 struct mptcp_subflow_context *subflow; 1270 1271 subflow = mptcp_subflow_ctx(ssk); 1272 subflow->send_mp_fail = 0; 1273 1274 *ptr++ = mptcp_option(MPTCPOPT_MP_FAIL, 1275 TCPOLEN_MPTCP_FAIL, 1276 0, 0); 1277 put_unaligned_be64(opts->fail_seq, ptr); 1278 ptr += 2; 1279 } 1280 1281 /* RST is mutually exclusive with everything else */ 1282 if (unlikely(OPTION_MPTCP_RST & opts->suboptions)) { 1283 *ptr++ = mptcp_option(MPTCPOPT_RST, 1284 TCPOLEN_MPTCP_RST, 1285 opts->reset_transient, 1286 opts->reset_reason); 1287 return; 1288 } 1289 1290 /* DSS, MPC, MPJ and ADD_ADDR are mutually exclusive, see 1291 * mptcp_established_options*() 1292 */ 1293 if (likely(OPTION_MPTCP_DSS & opts->suboptions)) { 1294 struct mptcp_ext *mpext = &opts->ext_copy; 1295 u8 len = TCPOLEN_MPTCP_DSS_BASE; 1296 u8 flags = 0; 1297 1298 if (mpext->use_ack) { 1299 flags = MPTCP_DSS_HAS_ACK; 1300 if (mpext->ack64) { 1301 len += TCPOLEN_MPTCP_DSS_ACK64; 1302 flags |= MPTCP_DSS_ACK64; 1303 } else { 1304 len += TCPOLEN_MPTCP_DSS_ACK32; 1305 } 1306 } 1307 1308 if (mpext->use_map) { 1309 len += TCPOLEN_MPTCP_DSS_MAP64; 1310 1311 /* Use only 64-bit mapping flags for now, add 1312 * support for optional 32-bit mappings later. 1313 */ 1314 flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64; 1315 if (mpext->data_fin) 1316 flags |= MPTCP_DSS_DATA_FIN; 1317 1318 if (opts->csum_reqd) 1319 len += TCPOLEN_MPTCP_DSS_CHECKSUM; 1320 } 1321 1322 *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags); 1323 1324 if (mpext->use_ack) { 1325 if (mpext->ack64) { 1326 put_unaligned_be64(mpext->data_ack, ptr); 1327 ptr += 2; 1328 } else { 1329 put_unaligned_be32(mpext->data_ack32, ptr); 1330 ptr += 1; 1331 } 1332 } 1333 1334 if (mpext->use_map) { 1335 put_unaligned_be64(mpext->data_seq, ptr); 1336 ptr += 2; 1337 put_unaligned_be32(mpext->subflow_seq, ptr); 1338 ptr += 1; 1339 if (opts->csum_reqd) { 1340 put_unaligned_be32(mpext->data_len << 16 | 1341 mptcp_make_csum(mpext), ptr); 1342 } else { 1343 put_unaligned_be32(mpext->data_len << 16 | 1344 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); 1345 } 1346 } 1347 } else if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | 1348 OPTION_MPTCP_MPC_ACK) & opts->suboptions) { 1349 u8 len, flag = MPTCP_CAP_HMAC_SHA256; 1350 1351 if (OPTION_MPTCP_MPC_SYN & opts->suboptions) { 1352 len = TCPOLEN_MPTCP_MPC_SYN; 1353 } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) { 1354 len = TCPOLEN_MPTCP_MPC_SYNACK; 1355 } else if (opts->ext_copy.data_len) { 1356 len = TCPOLEN_MPTCP_MPC_ACK_DATA; 1357 if (opts->csum_reqd) 1358 len += TCPOLEN_MPTCP_DSS_CHECKSUM; 1359 } else { 1360 len = TCPOLEN_MPTCP_MPC_ACK; 1361 } 1362 1363 if (opts->csum_reqd) 1364 flag |= MPTCP_CAP_CHECKSUM_REQD; 1365 1366 if (!opts->allow_join_id0) 1367 flag |= MPTCP_CAP_DENY_JOIN_ID0; 1368 1369 *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len, 1370 MPTCP_SUPPORTED_VERSION, 1371 flag); 1372 1373 if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) & 1374 opts->suboptions)) 1375 goto mp_capable_done; 1376 1377 put_unaligned_be64(opts->sndr_key, ptr); 1378 ptr += 2; 1379 if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions)) 1380 goto mp_capable_done; 1381 1382 put_unaligned_be64(opts->rcvr_key, ptr); 1383 ptr += 2; 1384 if (!opts->ext_copy.data_len) 1385 goto mp_capable_done; 1386 1387 if (opts->csum_reqd) { 1388 put_unaligned_be32(opts->ext_copy.data_len << 16 | 1389 mptcp_make_csum(&opts->ext_copy), ptr); 1390 } else { 1391 put_unaligned_be32(opts->ext_copy.data_len << 16 | 1392 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); 1393 } 1394 ptr += 1; 1395 1396 /* MPC is additionally mutually exclusive with MP_PRIO */ 1397 goto mp_capable_done; 1398 } else if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) { 1399 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1400 TCPOLEN_MPTCP_MPJ_SYN, 1401 opts->backup, opts->join_id); 1402 put_unaligned_be32(opts->token, ptr); 1403 ptr += 1; 1404 put_unaligned_be32(opts->nonce, ptr); 1405 ptr += 1; 1406 } else if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) { 1407 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1408 TCPOLEN_MPTCP_MPJ_SYNACK, 1409 opts->backup, opts->join_id); 1410 put_unaligned_be64(opts->thmac, ptr); 1411 ptr += 2; 1412 put_unaligned_be32(opts->nonce, ptr); 1413 ptr += 1; 1414 } else if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) { 1415 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, 1416 TCPOLEN_MPTCP_MPJ_ACK, 0, 0); 1417 memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN); 1418 ptr += 5; 1419 } else if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) { 1420 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE; 1421 u8 echo = MPTCP_ADDR_ECHO; 1422 1423 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1424 if (opts->addr.family == AF_INET6) 1425 len = TCPOLEN_MPTCP_ADD_ADDR6_BASE; 1426 #endif 1427 1428 if (opts->addr.port) 1429 len += TCPOLEN_MPTCP_PORT_LEN; 1430 1431 if (opts->ahmac) { 1432 len += sizeof(opts->ahmac); 1433 echo = 0; 1434 } 1435 1436 *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, 1437 len, echo, opts->addr.id); 1438 if (opts->addr.family == AF_INET) { 1439 memcpy((u8 *)ptr, (u8 *)&opts->addr.addr.s_addr, 4); 1440 ptr += 1; 1441 } 1442 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 1443 else if (opts->addr.family == AF_INET6) { 1444 memcpy((u8 *)ptr, opts->addr.addr6.s6_addr, 16); 1445 ptr += 4; 1446 } 1447 #endif 1448 1449 if (!opts->addr.port) { 1450 if (opts->ahmac) { 1451 put_unaligned_be64(opts->ahmac, ptr); 1452 ptr += 2; 1453 } 1454 } else { 1455 u16 port = ntohs(opts->addr.port); 1456 1457 if (opts->ahmac) { 1458 u8 *bptr = (u8 *)ptr; 1459 1460 put_unaligned_be16(port, bptr); 1461 bptr += 2; 1462 put_unaligned_be64(opts->ahmac, bptr); 1463 bptr += 8; 1464 put_unaligned_be16(TCPOPT_NOP << 8 | 1465 TCPOPT_NOP, bptr); 1466 1467 ptr += 3; 1468 } else { 1469 put_unaligned_be32(port << 16 | 1470 TCPOPT_NOP << 8 | 1471 TCPOPT_NOP, ptr); 1472 ptr += 1; 1473 } 1474 } 1475 } 1476 1477 if (OPTION_MPTCP_PRIO & opts->suboptions) { 1478 const struct sock *ssk = (const struct sock *)tp; 1479 struct mptcp_subflow_context *subflow; 1480 1481 subflow = mptcp_subflow_ctx(ssk); 1482 subflow->send_mp_prio = 0; 1483 1484 *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO, 1485 TCPOLEN_MPTCP_PRIO, 1486 opts->backup, TCPOPT_NOP); 1487 } 1488 1489 mp_capable_done: 1490 if (OPTION_MPTCP_RM_ADDR & opts->suboptions) { 1491 u8 i = 1; 1492 1493 *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR, 1494 TCPOLEN_MPTCP_RM_ADDR_BASE + opts->rm_list.nr, 1495 0, opts->rm_list.ids[0]); 1496 1497 while (i < opts->rm_list.nr) { 1498 u8 id1, id2, id3, id4; 1499 1500 id1 = opts->rm_list.ids[i]; 1501 id2 = i + 1 < opts->rm_list.nr ? opts->rm_list.ids[i + 1] : TCPOPT_NOP; 1502 id3 = i + 2 < opts->rm_list.nr ? opts->rm_list.ids[i + 2] : TCPOPT_NOP; 1503 id4 = i + 3 < opts->rm_list.nr ? opts->rm_list.ids[i + 3] : TCPOPT_NOP; 1504 put_unaligned_be32(id1 << 24 | id2 << 16 | id3 << 8 | id4, ptr); 1505 ptr += 1; 1506 i += 4; 1507 } 1508 } 1509 1510 if (tp) 1511 mptcp_set_rwin(tp); 1512 } 1513 1514 __be32 mptcp_get_reset_option(const struct sk_buff *skb) 1515 { 1516 const struct mptcp_ext *ext = mptcp_get_ext(skb); 1517 u8 flags, reason; 1518 1519 if (ext) { 1520 flags = ext->reset_transient; 1521 reason = ext->reset_reason; 1522 1523 return mptcp_option(MPTCPOPT_RST, TCPOLEN_MPTCP_RST, 1524 flags, reason); 1525 } 1526 1527 return htonl(0u); 1528 } 1529 EXPORT_SYMBOL_GPL(mptcp_get_reset_option); 1530