1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 */ 21 22 #include <net/tcp.h> 23 #include <net/xfrm.h> 24 #include <net/busy_poll.h> 25 26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 27 { 28 if (seq == s_win) 29 return true; 30 if (after(end_seq, s_win) && before(seq, e_win)) 31 return true; 32 return seq == e_win && seq == end_seq; 33 } 34 35 static enum tcp_tw_status 36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, 37 const struct sk_buff *skb, int mib_idx) 38 { 39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 40 41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, 42 &tcptw->tw_last_oow_ack_time)) { 43 /* Send ACK. Note, we do not put the bucket, 44 * it will be released by caller. 45 */ 46 return TCP_TW_ACK; 47 } 48 49 /* We are rate-limiting, so just release the tw sock and drop skb. */ 50 inet_twsk_put(tw); 51 return TCP_TW_SUCCESS; 52 } 53 54 /* 55 * * Main purpose of TIME-WAIT state is to close connection gracefully, 56 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 57 * (and, probably, tail of data) and one or more our ACKs are lost. 58 * * What is TIME-WAIT timeout? It is associated with maximal packet 59 * lifetime in the internet, which results in wrong conclusion, that 60 * it is set to catch "old duplicate segments" wandering out of their path. 61 * It is not quite correct. This timeout is calculated so that it exceeds 62 * maximal retransmission timeout enough to allow to lose one (or more) 63 * segments sent by peer and our ACKs. This time may be calculated from RTO. 64 * * When TIME-WAIT socket receives RST, it means that another end 65 * finally closed and we are allowed to kill TIME-WAIT too. 66 * * Second purpose of TIME-WAIT is catching old duplicate segments. 67 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 68 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 69 * * If we invented some more clever way to catch duplicates 70 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 71 * 72 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 73 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 74 * from the very beginning. 75 * 76 * NOTE. With recycling (and later with fin-wait-2) TW bucket 77 * is _not_ stateless. It means, that strictly speaking we must 78 * spinlock it. I do not want! Well, probability of misbehaviour 79 * is ridiculously low and, seems, we could use some mb() tricks 80 * to avoid misread sequence numbers, states etc. --ANK 81 * 82 * We don't need to initialize tmp_out.sack_ok as we don't use the results 83 */ 84 enum tcp_tw_status 85 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 86 const struct tcphdr *th) 87 { 88 struct tcp_options_received tmp_opt; 89 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 90 bool paws_reject = false; 91 92 tmp_opt.saw_tstamp = 0; 93 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 94 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); 95 96 if (tmp_opt.saw_tstamp) { 97 if (tmp_opt.rcv_tsecr) 98 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 99 tmp_opt.ts_recent = tcptw->tw_ts_recent; 100 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 101 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 102 } 103 } 104 105 if (tw->tw_substate == TCP_FIN_WAIT2) { 106 /* Just repeat all the checks of tcp_rcv_state_process() */ 107 108 /* Out of window, send ACK */ 109 if (paws_reject || 110 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 111 tcptw->tw_rcv_nxt, 112 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 113 return tcp_timewait_check_oow_rate_limit( 114 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); 115 116 if (th->rst) 117 goto kill; 118 119 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 120 return TCP_TW_RST; 121 122 /* Dup ACK? */ 123 if (!th->ack || 124 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 125 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 126 inet_twsk_put(tw); 127 return TCP_TW_SUCCESS; 128 } 129 130 /* New data or FIN. If new data arrive after half-duplex close, 131 * reset. 132 */ 133 if (!th->fin || 134 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) 135 return TCP_TW_RST; 136 137 /* FIN arrived, enter true time-wait state. */ 138 tw->tw_substate = TCP_TIME_WAIT; 139 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 140 if (tmp_opt.saw_tstamp) { 141 tcptw->tw_ts_recent_stamp = ktime_get_seconds(); 142 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 143 } 144 145 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 146 return TCP_TW_ACK; 147 } 148 149 /* 150 * Now real TIME-WAIT state. 151 * 152 * RFC 1122: 153 * "When a connection is [...] on TIME-WAIT state [...] 154 * [a TCP] MAY accept a new SYN from the remote TCP to 155 * reopen the connection directly, if it: 156 * 157 * (1) assigns its initial sequence number for the new 158 * connection to be larger than the largest sequence 159 * number it used on the previous connection incarnation, 160 * and 161 * 162 * (2) returns to TIME-WAIT state if the SYN turns out 163 * to be an old duplicate". 164 */ 165 166 if (!paws_reject && 167 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 168 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 169 /* In window segment, it may be only reset or bare ack. */ 170 171 if (th->rst) { 172 /* This is TIME_WAIT assassination, in two flavors. 173 * Oh well... nobody has a sufficient solution to this 174 * protocol bug yet. 175 */ 176 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { 177 kill: 178 inet_twsk_deschedule_put(tw); 179 return TCP_TW_SUCCESS; 180 } 181 } else { 182 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 183 } 184 185 if (tmp_opt.saw_tstamp) { 186 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 187 tcptw->tw_ts_recent_stamp = ktime_get_seconds(); 188 } 189 190 inet_twsk_put(tw); 191 return TCP_TW_SUCCESS; 192 } 193 194 /* Out of window segment. 195 196 All the segments are ACKed immediately. 197 198 The only exception is new SYN. We accept it, if it is 199 not old duplicate and we are not in danger to be killed 200 by delayed old duplicates. RFC check is that it has 201 newer sequence number works at rates <40Mbit/sec. 202 However, if paws works, it is reliable AND even more, 203 we even may relax silly seq space cutoff. 204 205 RED-PEN: we violate main RFC requirement, if this SYN will appear 206 old duplicate (i.e. we receive RST in reply to SYN-ACK), 207 we must return socket to time-wait state. It is not good, 208 but not fatal yet. 209 */ 210 211 if (th->syn && !th->rst && !th->ack && !paws_reject && 212 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 213 (tmp_opt.saw_tstamp && 214 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 215 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 216 if (isn == 0) 217 isn++; 218 TCP_SKB_CB(skb)->tcp_tw_isn = isn; 219 return TCP_TW_SYN; 220 } 221 222 if (paws_reject) 223 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 224 225 if (!th->rst) { 226 /* In this case we must reset the TIMEWAIT timer. 227 * 228 * If it is ACKless SYN it may be both old duplicate 229 * and new good SYN with random sequence number <rcv_nxt. 230 * Do not reschedule in the last case. 231 */ 232 if (paws_reject || th->ack) 233 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 234 235 return tcp_timewait_check_oow_rate_limit( 236 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 237 } 238 inet_twsk_put(tw); 239 return TCP_TW_SUCCESS; 240 } 241 EXPORT_SYMBOL(tcp_timewait_state_process); 242 243 /* 244 * Move a socket to time-wait or dead fin-wait-2 state. 245 */ 246 void tcp_time_wait(struct sock *sk, int state, int timeo) 247 { 248 const struct inet_connection_sock *icsk = inet_csk(sk); 249 const struct tcp_sock *tp = tcp_sk(sk); 250 struct net *net = sock_net(sk); 251 struct inet_timewait_sock *tw; 252 253 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state); 254 255 if (tw) { 256 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 257 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 258 struct inet_sock *inet = inet_sk(sk); 259 260 tw->tw_transparent = inet->transparent; 261 tw->tw_mark = sk->sk_mark; 262 tw->tw_priority = sk->sk_priority; 263 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 264 tcptw->tw_rcv_nxt = tp->rcv_nxt; 265 tcptw->tw_snd_nxt = tp->snd_nxt; 266 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 267 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 268 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 269 tcptw->tw_ts_offset = tp->tsoffset; 270 tcptw->tw_last_oow_ack_time = 0; 271 tcptw->tw_tx_delay = tp->tcp_tx_delay; 272 #if IS_ENABLED(CONFIG_IPV6) 273 if (tw->tw_family == PF_INET6) { 274 struct ipv6_pinfo *np = inet6_sk(sk); 275 276 tw->tw_v6_daddr = sk->sk_v6_daddr; 277 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 278 tw->tw_tclass = np->tclass; 279 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 280 tw->tw_txhash = sk->sk_txhash; 281 tw->tw_ipv6only = sk->sk_ipv6only; 282 } 283 #endif 284 285 #ifdef CONFIG_TCP_MD5SIG 286 /* 287 * The timewait bucket does not have the key DB from the 288 * sock structure. We just make a quick copy of the 289 * md5 key being used (if indeed we are using one) 290 * so the timewait ack generating code has the key. 291 */ 292 do { 293 tcptw->tw_md5_key = NULL; 294 if (static_branch_unlikely(&tcp_md5_needed)) { 295 struct tcp_md5sig_key *key; 296 297 key = tp->af_specific->md5_lookup(sk, sk); 298 if (key) { 299 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 300 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()); 301 } 302 } 303 } while (0); 304 #endif 305 306 /* Get the TIME_WAIT timeout firing. */ 307 if (timeo < rto) 308 timeo = rto; 309 310 if (state == TCP_TIME_WAIT) 311 timeo = TCP_TIMEWAIT_LEN; 312 313 /* tw_timer is pinned, so we need to make sure BH are disabled 314 * in following section, otherwise timer handler could run before 315 * we complete the initialization. 316 */ 317 local_bh_disable(); 318 inet_twsk_schedule(tw, timeo); 319 /* Linkage updates. 320 * Note that access to tw after this point is illegal. 321 */ 322 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo); 323 local_bh_enable(); 324 } else { 325 /* Sorry, if we're out of memory, just CLOSE this 326 * socket up. We've got bigger problems than 327 * non-graceful socket closings. 328 */ 329 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW); 330 } 331 332 tcp_update_metrics(sk); 333 tcp_done(sk); 334 } 335 EXPORT_SYMBOL(tcp_time_wait); 336 337 void tcp_twsk_destructor(struct sock *sk) 338 { 339 #ifdef CONFIG_TCP_MD5SIG 340 if (static_branch_unlikely(&tcp_md5_needed)) { 341 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 342 343 if (twsk->tw_md5_key) 344 kfree_rcu(twsk->tw_md5_key, rcu); 345 } 346 #endif 347 } 348 EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 349 350 void tcp_twsk_purge(struct list_head *net_exit_list, int family) 351 { 352 bool purged_once = false; 353 struct net *net; 354 355 list_for_each_entry(net, net_exit_list, exit_list) { 356 /* The last refcount is decremented in tcp_sk_exit_batch() */ 357 if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1) 358 continue; 359 360 if (net->ipv4.tcp_death_row.hashinfo->pernet) { 361 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family); 362 } else if (!purged_once) { 363 inet_twsk_purge(&tcp_hashinfo, family); 364 purged_once = true; 365 } 366 } 367 } 368 EXPORT_SYMBOL_GPL(tcp_twsk_purge); 369 370 /* Warning : This function is called without sk_listener being locked. 371 * Be sure to read socket fields once, as their value could change under us. 372 */ 373 void tcp_openreq_init_rwin(struct request_sock *req, 374 const struct sock *sk_listener, 375 const struct dst_entry *dst) 376 { 377 struct inet_request_sock *ireq = inet_rsk(req); 378 const struct tcp_sock *tp = tcp_sk(sk_listener); 379 int full_space = tcp_full_space(sk_listener); 380 u32 window_clamp; 381 __u8 rcv_wscale; 382 u32 rcv_wnd; 383 int mss; 384 385 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 386 window_clamp = READ_ONCE(tp->window_clamp); 387 /* Set this up on the first call only */ 388 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); 389 390 /* limit the window selection if the user enforce a smaller rx buffer */ 391 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && 392 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) 393 req->rsk_window_clamp = full_space; 394 395 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); 396 if (rcv_wnd == 0) 397 rcv_wnd = dst_metric(dst, RTAX_INITRWND); 398 else if (full_space < rcv_wnd * mss) 399 full_space = rcv_wnd * mss; 400 401 /* tcp_full_space because it is guaranteed to be the first packet */ 402 tcp_select_initial_window(sk_listener, full_space, 403 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 404 &req->rsk_rcv_wnd, 405 &req->rsk_window_clamp, 406 ireq->wscale_ok, 407 &rcv_wscale, 408 rcv_wnd); 409 ireq->rcv_wscale = rcv_wscale; 410 } 411 EXPORT_SYMBOL(tcp_openreq_init_rwin); 412 413 static void tcp_ecn_openreq_child(struct tcp_sock *tp, 414 const struct request_sock *req) 415 { 416 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 417 } 418 419 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) 420 { 421 struct inet_connection_sock *icsk = inet_csk(sk); 422 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 423 bool ca_got_dst = false; 424 425 if (ca_key != TCP_CA_UNSPEC) { 426 const struct tcp_congestion_ops *ca; 427 428 rcu_read_lock(); 429 ca = tcp_ca_find_key(ca_key); 430 if (likely(ca && bpf_try_module_get(ca, ca->owner))) { 431 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 432 icsk->icsk_ca_ops = ca; 433 ca_got_dst = true; 434 } 435 rcu_read_unlock(); 436 } 437 438 /* If no valid choice made yet, assign current system default ca. */ 439 if (!ca_got_dst && 440 (!icsk->icsk_ca_setsockopt || 441 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) 442 tcp_assign_congestion_control(sk); 443 444 tcp_set_ca_state(sk, TCP_CA_Open); 445 } 446 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); 447 448 static void smc_check_reset_syn_req(struct tcp_sock *oldtp, 449 struct request_sock *req, 450 struct tcp_sock *newtp) 451 { 452 #if IS_ENABLED(CONFIG_SMC) 453 struct inet_request_sock *ireq; 454 455 if (static_branch_unlikely(&tcp_have_smc)) { 456 ireq = inet_rsk(req); 457 if (oldtp->syn_smc && !ireq->smc_ok) 458 newtp->syn_smc = 0; 459 } 460 #endif 461 } 462 463 /* This is not only more efficient than what we used to do, it eliminates 464 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 465 * 466 * Actually, we could lots of memory writes here. tp of listening 467 * socket contains all necessary default parameters. 468 */ 469 struct sock *tcp_create_openreq_child(const struct sock *sk, 470 struct request_sock *req, 471 struct sk_buff *skb) 472 { 473 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 474 const struct inet_request_sock *ireq = inet_rsk(req); 475 struct tcp_request_sock *treq = tcp_rsk(req); 476 struct inet_connection_sock *newicsk; 477 struct tcp_sock *oldtp, *newtp; 478 u32 seq; 479 480 if (!newsk) 481 return NULL; 482 483 newicsk = inet_csk(newsk); 484 newtp = tcp_sk(newsk); 485 oldtp = tcp_sk(sk); 486 487 smc_check_reset_syn_req(oldtp, req, newtp); 488 489 /* Now setup tcp_sock */ 490 newtp->pred_flags = 0; 491 492 seq = treq->rcv_isn + 1; 493 newtp->rcv_wup = seq; 494 WRITE_ONCE(newtp->copied_seq, seq); 495 WRITE_ONCE(newtp->rcv_nxt, seq); 496 newtp->segs_in = 1; 497 498 seq = treq->snt_isn + 1; 499 newtp->snd_sml = newtp->snd_una = seq; 500 WRITE_ONCE(newtp->snd_nxt, seq); 501 newtp->snd_up = seq; 502 503 INIT_LIST_HEAD(&newtp->tsq_node); 504 INIT_LIST_HEAD(&newtp->tsorted_sent_queue); 505 506 tcp_init_wl(newtp, treq->rcv_isn); 507 508 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 509 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; 510 511 newtp->lsndtime = tcp_jiffies32; 512 newsk->sk_txhash = treq->txhash; 513 newtp->total_retrans = req->num_retrans; 514 515 tcp_init_xmit_timers(newsk); 516 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); 517 518 if (sock_flag(newsk, SOCK_KEEPOPEN)) 519 inet_csk_reset_keepalive_timer(newsk, 520 keepalive_time_when(newtp)); 521 522 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 523 newtp->rx_opt.sack_ok = ireq->sack_ok; 524 newtp->window_clamp = req->rsk_window_clamp; 525 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 526 newtp->rcv_wnd = req->rsk_rcv_wnd; 527 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 528 if (newtp->rx_opt.wscale_ok) { 529 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 530 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 531 } else { 532 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 533 newtp->window_clamp = min(newtp->window_clamp, 65535U); 534 } 535 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; 536 newtp->max_window = newtp->snd_wnd; 537 538 if (newtp->rx_opt.tstamp_ok) { 539 newtp->rx_opt.ts_recent = req->ts_recent; 540 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); 541 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 542 } else { 543 newtp->rx_opt.ts_recent_stamp = 0; 544 newtp->tcp_header_len = sizeof(struct tcphdr); 545 } 546 if (req->num_timeout) { 547 newtp->undo_marker = treq->snt_isn; 548 newtp->retrans_stamp = div_u64(treq->snt_synack, 549 USEC_PER_SEC / TCP_TS_HZ); 550 } 551 newtp->tsoffset = treq->ts_off; 552 #ifdef CONFIG_TCP_MD5SIG 553 newtp->md5sig_info = NULL; /*XXX*/ 554 if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req))) 555 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 556 #endif 557 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 558 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 559 newtp->rx_opt.mss_clamp = req->mss; 560 tcp_ecn_openreq_child(newtp, req); 561 newtp->fastopen_req = NULL; 562 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); 563 564 newtp->bpf_chg_cc_inprogress = 0; 565 tcp_bpf_clone(sk, newsk); 566 567 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 568 569 return newsk; 570 } 571 EXPORT_SYMBOL(tcp_create_openreq_child); 572 573 /* 574 * Process an incoming packet for SYN_RECV sockets represented as a 575 * request_sock. Normally sk is the listener socket but for TFO it 576 * points to the child socket. 577 * 578 * XXX (TFO) - The current impl contains a special check for ack 579 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? 580 * 581 * We don't need to initialize tmp_opt.sack_ok as we don't use the results 582 */ 583 584 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 585 struct request_sock *req, 586 bool fastopen, bool *req_stolen) 587 { 588 struct tcp_options_received tmp_opt; 589 struct sock *child; 590 const struct tcphdr *th = tcp_hdr(skb); 591 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 592 bool paws_reject = false; 593 bool own_req; 594 595 tmp_opt.saw_tstamp = 0; 596 if (th->doff > (sizeof(struct tcphdr)>>2)) { 597 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); 598 599 if (tmp_opt.saw_tstamp) { 600 tmp_opt.ts_recent = req->ts_recent; 601 if (tmp_opt.rcv_tsecr) 602 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; 603 /* We do not store true stamp, but it is not required, 604 * it can be estimated (approximately) 605 * from another data. 606 */ 607 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ; 608 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 609 } 610 } 611 612 /* Check for pure retransmitted SYN. */ 613 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 614 flg == TCP_FLAG_SYN && 615 !paws_reject) { 616 /* 617 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 618 * this case on figure 6 and figure 8, but formal 619 * protocol description says NOTHING. 620 * To be more exact, it says that we should send ACK, 621 * because this segment (at least, if it has no data) 622 * is out of window. 623 * 624 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 625 * describe SYN-RECV state. All the description 626 * is wrong, we cannot believe to it and should 627 * rely only on common sense and implementation 628 * experience. 629 * 630 * Enforce "SYN-ACK" according to figure 8, figure 6 631 * of RFC793, fixed by RFC1122. 632 * 633 * Note that even if there is new data in the SYN packet 634 * they will be thrown away too. 635 * 636 * Reset timer after retransmitting SYNACK, similar to 637 * the idea of fast retransmit in recovery. 638 */ 639 if (!tcp_oow_rate_limited(sock_net(sk), skb, 640 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 641 &tcp_rsk(req)->last_oow_ack_time) && 642 643 !inet_rtx_syn_ack(sk, req)) { 644 unsigned long expires = jiffies; 645 646 expires += reqsk_timeout(req, TCP_RTO_MAX); 647 if (!fastopen) 648 mod_timer_pending(&req->rsk_timer, expires); 649 else 650 req->rsk_timer.expires = expires; 651 } 652 return NULL; 653 } 654 655 /* Further reproduces section "SEGMENT ARRIVES" 656 for state SYN-RECEIVED of RFC793. 657 It is broken, however, it does not work only 658 when SYNs are crossed. 659 660 You would think that SYN crossing is impossible here, since 661 we should have a SYN_SENT socket (from connect()) on our end, 662 but this is not true if the crossed SYNs were sent to both 663 ends by a malicious third party. We must defend against this, 664 and to do that we first verify the ACK (as per RFC793, page 665 36) and reset if it is invalid. Is this a true full defense? 666 To convince ourselves, let us consider a way in which the ACK 667 test can still pass in this 'malicious crossed SYNs' case. 668 Malicious sender sends identical SYNs (and thus identical sequence 669 numbers) to both A and B: 670 671 A: gets SYN, seq=7 672 B: gets SYN, seq=7 673 674 By our good fortune, both A and B select the same initial 675 send sequence number of seven :-) 676 677 A: sends SYN|ACK, seq=7, ack_seq=8 678 B: sends SYN|ACK, seq=7, ack_seq=8 679 680 So we are now A eating this SYN|ACK, ACK test passes. So 681 does sequence test, SYN is truncated, and thus we consider 682 it a bare ACK. 683 684 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 685 bare ACK. Otherwise, we create an established connection. Both 686 ends (listening sockets) accept the new incoming connection and try 687 to talk to each other. 8-) 688 689 Note: This case is both harmless, and rare. Possibility is about the 690 same as us discovering intelligent life on another plant tomorrow. 691 692 But generally, we should (RFC lies!) to accept ACK 693 from SYNACK both here and in tcp_rcv_state_process(). 694 tcp_rcv_state_process() does not, hence, we do not too. 695 696 Note that the case is absolutely generic: 697 we cannot optimize anything here without 698 violating protocol. All the checks must be made 699 before attempt to create socket. 700 */ 701 702 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 703 * and the incoming segment acknowledges something not yet 704 * sent (the segment carries an unacceptable ACK) ... 705 * a reset is sent." 706 * 707 * Invalid ACK: reset will be sent by listening socket. 708 * Note that the ACK validity check for a Fast Open socket is done 709 * elsewhere and is checked directly against the child socket rather 710 * than req because user data may have been sent out. 711 */ 712 if ((flg & TCP_FLAG_ACK) && !fastopen && 713 (TCP_SKB_CB(skb)->ack_seq != 714 tcp_rsk(req)->snt_isn + 1)) 715 return sk; 716 717 /* Also, it would be not so bad idea to check rcv_tsecr, which 718 * is essentially ACK extension and too early or too late values 719 * should cause reset in unsynchronized states. 720 */ 721 722 /* RFC793: "first check sequence number". */ 723 724 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 725 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { 726 /* Out of window: send ACK and drop. */ 727 if (!(flg & TCP_FLAG_RST) && 728 !tcp_oow_rate_limited(sock_net(sk), skb, 729 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 730 &tcp_rsk(req)->last_oow_ack_time)) 731 req->rsk_ops->send_ack(sk, skb, req); 732 if (paws_reject) 733 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 734 return NULL; 735 } 736 737 /* In sequence, PAWS is OK. */ 738 739 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) 740 req->ts_recent = tmp_opt.rcv_tsval; 741 742 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 743 /* Truncate SYN, it is out of window starting 744 at tcp_rsk(req)->rcv_isn + 1. */ 745 flg &= ~TCP_FLAG_SYN; 746 } 747 748 /* RFC793: "second check the RST bit" and 749 * "fourth, check the SYN bit" 750 */ 751 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 752 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 753 goto embryonic_reset; 754 } 755 756 /* ACK sequence verified above, just make sure ACK is 757 * set. If ACK not set, just silently drop the packet. 758 * 759 * XXX (TFO) - if we ever allow "data after SYN", the 760 * following check needs to be removed. 761 */ 762 if (!(flg & TCP_FLAG_ACK)) 763 return NULL; 764 765 /* For Fast Open no more processing is needed (sk is the 766 * child socket). 767 */ 768 if (fastopen) 769 return sk; 770 771 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 772 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 773 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 774 inet_rsk(req)->acked = 1; 775 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 776 return NULL; 777 } 778 779 /* OK, ACK is valid, create big socket and 780 * feed this segment to it. It will repeat all 781 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 782 * ESTABLISHED STATE. If it will be dropped after 783 * socket is created, wait for troubles. 784 */ 785 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 786 req, &own_req); 787 if (!child) 788 goto listen_overflow; 789 790 if (own_req && rsk_drop_req(req)) { 791 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); 792 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); 793 return child; 794 } 795 796 sock_rps_save_rxhash(child, skb); 797 tcp_synack_rtt_meas(child, req); 798 *req_stolen = !own_req; 799 return inet_csk_complete_hashdance(sk, child, req, own_req); 800 801 listen_overflow: 802 if (sk != req->rsk_listener) 803 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 804 805 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { 806 inet_rsk(req)->acked = 1; 807 return NULL; 808 } 809 810 embryonic_reset: 811 if (!(flg & TCP_FLAG_RST)) { 812 /* Received a bad SYN pkt - for TFO We try not to reset 813 * the local connection unless it's really necessary to 814 * avoid becoming vulnerable to outside attack aiming at 815 * resetting legit local connections. 816 */ 817 req->rsk_ops->send_reset(sk, skb); 818 } else if (fastopen) { /* received a valid RST pkt */ 819 reqsk_fastopen_remove(sk, req, true); 820 tcp_reset(sk, skb); 821 } 822 if (!fastopen) { 823 bool unlinked = inet_csk_reqsk_queue_drop(sk, req); 824 825 if (unlinked) 826 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 827 *req_stolen = !unlinked; 828 } 829 return NULL; 830 } 831 EXPORT_SYMBOL(tcp_check_req); 832 833 /* 834 * Queue segment on the new socket if the new socket is active, 835 * otherwise we just shortcircuit this and continue with 836 * the new socket. 837 * 838 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV 839 * when entering. But other states are possible due to a race condition 840 * where after __inet_lookup_established() fails but before the listener 841 * locked is obtained, other packets cause the same connection to 842 * be created. 843 */ 844 845 int tcp_child_process(struct sock *parent, struct sock *child, 846 struct sk_buff *skb) 847 __releases(&((child)->sk_lock.slock)) 848 { 849 int ret = 0; 850 int state = child->sk_state; 851 852 /* record sk_napi_id and sk_rx_queue_mapping of child. */ 853 sk_mark_napi_id_set(child, skb); 854 855 tcp_segs_in(tcp_sk(child), skb); 856 if (!sock_owned_by_user(child)) { 857 ret = tcp_rcv_state_process(child, skb); 858 /* Wakeup parent, send SIGIO */ 859 if (state == TCP_SYN_RECV && child->sk_state != state) 860 parent->sk_data_ready(parent); 861 } else { 862 /* Alas, it is possible again, because we do lookup 863 * in main socket hash table and lock on listening 864 * socket does not protect us more. 865 */ 866 __sk_add_backlog(child, skb); 867 } 868 869 bh_unlock_sock(child); 870 sock_put(child); 871 return ret; 872 } 873 EXPORT_SYMBOL(tcp_child_process); 874