1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 */ 21 22 #include <net/tcp.h> 23 #include <net/xfrm.h> 24 #include <net/busy_poll.h> 25 26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 27 { 28 if (seq == s_win) 29 return true; 30 if (after(end_seq, s_win) && before(seq, e_win)) 31 return true; 32 return seq == e_win && seq == end_seq; 33 } 34 35 static enum tcp_tw_status 36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, 37 const struct sk_buff *skb, int mib_idx) 38 { 39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 40 41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, 42 &tcptw->tw_last_oow_ack_time)) { 43 /* Send ACK. Note, we do not put the bucket, 44 * it will be released by caller. 45 */ 46 return TCP_TW_ACK; 47 } 48 49 /* We are rate-limiting, so just release the tw sock and drop skb. */ 50 inet_twsk_put(tw); 51 return TCP_TW_SUCCESS; 52 } 53 54 /* 55 * * Main purpose of TIME-WAIT state is to close connection gracefully, 56 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 57 * (and, probably, tail of data) and one or more our ACKs are lost. 58 * * What is TIME-WAIT timeout? It is associated with maximal packet 59 * lifetime in the internet, which results in wrong conclusion, that 60 * it is set to catch "old duplicate segments" wandering out of their path. 61 * It is not quite correct. This timeout is calculated so that it exceeds 62 * maximal retransmission timeout enough to allow to lose one (or more) 63 * segments sent by peer and our ACKs. This time may be calculated from RTO. 64 * * When TIME-WAIT socket receives RST, it means that another end 65 * finally closed and we are allowed to kill TIME-WAIT too. 66 * * Second purpose of TIME-WAIT is catching old duplicate segments. 67 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 68 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 69 * * If we invented some more clever way to catch duplicates 70 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 71 * 72 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 73 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 74 * from the very beginning. 75 * 76 * NOTE. With recycling (and later with fin-wait-2) TW bucket 77 * is _not_ stateless. It means, that strictly speaking we must 78 * spinlock it. I do not want! Well, probability of misbehaviour 79 * is ridiculously low and, seems, we could use some mb() tricks 80 * to avoid misread sequence numbers, states etc. --ANK 81 * 82 * We don't need to initialize tmp_out.sack_ok as we don't use the results 83 */ 84 enum tcp_tw_status 85 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 86 const struct tcphdr *th) 87 { 88 struct tcp_options_received tmp_opt; 89 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 90 bool paws_reject = false; 91 92 tmp_opt.saw_tstamp = 0; 93 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 94 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); 95 96 if (tmp_opt.saw_tstamp) { 97 if (tmp_opt.rcv_tsecr) 98 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 99 tmp_opt.ts_recent = tcptw->tw_ts_recent; 100 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 101 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 102 } 103 } 104 105 if (tw->tw_substate == TCP_FIN_WAIT2) { 106 /* Just repeat all the checks of tcp_rcv_state_process() */ 107 108 /* Out of window, send ACK */ 109 if (paws_reject || 110 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 111 tcptw->tw_rcv_nxt, 112 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 113 return tcp_timewait_check_oow_rate_limit( 114 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); 115 116 if (th->rst) 117 goto kill; 118 119 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 120 return TCP_TW_RST; 121 122 /* Dup ACK? */ 123 if (!th->ack || 124 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 125 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 126 inet_twsk_put(tw); 127 return TCP_TW_SUCCESS; 128 } 129 130 /* New data or FIN. If new data arrive after half-duplex close, 131 * reset. 132 */ 133 if (!th->fin || 134 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) 135 return TCP_TW_RST; 136 137 /* FIN arrived, enter true time-wait state. */ 138 tw->tw_substate = TCP_TIME_WAIT; 139 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 140 if (tmp_opt.saw_tstamp) { 141 tcptw->tw_ts_recent_stamp = ktime_get_seconds(); 142 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 143 } 144 145 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 146 return TCP_TW_ACK; 147 } 148 149 /* 150 * Now real TIME-WAIT state. 151 * 152 * RFC 1122: 153 * "When a connection is [...] on TIME-WAIT state [...] 154 * [a TCP] MAY accept a new SYN from the remote TCP to 155 * reopen the connection directly, if it: 156 * 157 * (1) assigns its initial sequence number for the new 158 * connection to be larger than the largest sequence 159 * number it used on the previous connection incarnation, 160 * and 161 * 162 * (2) returns to TIME-WAIT state if the SYN turns out 163 * to be an old duplicate". 164 */ 165 166 if (!paws_reject && 167 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 168 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 169 /* In window segment, it may be only reset or bare ack. */ 170 171 if (th->rst) { 172 /* This is TIME_WAIT assassination, in two flavors. 173 * Oh well... nobody has a sufficient solution to this 174 * protocol bug yet. 175 */ 176 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { 177 kill: 178 inet_twsk_deschedule_put(tw); 179 return TCP_TW_SUCCESS; 180 } 181 } else { 182 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 183 } 184 185 if (tmp_opt.saw_tstamp) { 186 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 187 tcptw->tw_ts_recent_stamp = ktime_get_seconds(); 188 } 189 190 inet_twsk_put(tw); 191 return TCP_TW_SUCCESS; 192 } 193 194 /* Out of window segment. 195 196 All the segments are ACKed immediately. 197 198 The only exception is new SYN. We accept it, if it is 199 not old duplicate and we are not in danger to be killed 200 by delayed old duplicates. RFC check is that it has 201 newer sequence number works at rates <40Mbit/sec. 202 However, if paws works, it is reliable AND even more, 203 we even may relax silly seq space cutoff. 204 205 RED-PEN: we violate main RFC requirement, if this SYN will appear 206 old duplicate (i.e. we receive RST in reply to SYN-ACK), 207 we must return socket to time-wait state. It is not good, 208 but not fatal yet. 209 */ 210 211 if (th->syn && !th->rst && !th->ack && !paws_reject && 212 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 213 (tmp_opt.saw_tstamp && 214 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 215 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 216 if (isn == 0) 217 isn++; 218 TCP_SKB_CB(skb)->tcp_tw_isn = isn; 219 return TCP_TW_SYN; 220 } 221 222 if (paws_reject) 223 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 224 225 if (!th->rst) { 226 /* In this case we must reset the TIMEWAIT timer. 227 * 228 * If it is ACKless SYN it may be both old duplicate 229 * and new good SYN with random sequence number <rcv_nxt. 230 * Do not reschedule in the last case. 231 */ 232 if (paws_reject || th->ack) 233 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); 234 235 return tcp_timewait_check_oow_rate_limit( 236 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 237 } 238 inet_twsk_put(tw); 239 return TCP_TW_SUCCESS; 240 } 241 EXPORT_SYMBOL(tcp_timewait_state_process); 242 243 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) 244 { 245 #ifdef CONFIG_TCP_MD5SIG 246 const struct tcp_sock *tp = tcp_sk(sk); 247 struct tcp_md5sig_key *key; 248 249 /* 250 * The timewait bucket does not have the key DB from the 251 * sock structure. We just make a quick copy of the 252 * md5 key being used (if indeed we are using one) 253 * so the timewait ack generating code has the key. 254 */ 255 tcptw->tw_md5_key = NULL; 256 if (!static_branch_unlikely(&tcp_md5_needed.key)) 257 return; 258 259 key = tp->af_specific->md5_lookup(sk, sk); 260 if (key) { 261 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 262 if (!tcptw->tw_md5_key) 263 return; 264 if (!tcp_alloc_md5sig_pool()) 265 goto out_free; 266 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) 267 goto out_free; 268 } 269 return; 270 out_free: 271 WARN_ON_ONCE(1); 272 kfree(tcptw->tw_md5_key); 273 tcptw->tw_md5_key = NULL; 274 #endif 275 } 276 277 /* 278 * Move a socket to time-wait or dead fin-wait-2 state. 279 */ 280 void tcp_time_wait(struct sock *sk, int state, int timeo) 281 { 282 const struct inet_connection_sock *icsk = inet_csk(sk); 283 const struct tcp_sock *tp = tcp_sk(sk); 284 struct net *net = sock_net(sk); 285 struct inet_timewait_sock *tw; 286 287 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state); 288 289 if (tw) { 290 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 291 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 292 293 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk); 294 tw->tw_mark = sk->sk_mark; 295 tw->tw_priority = READ_ONCE(sk->sk_priority); 296 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 297 tcptw->tw_rcv_nxt = tp->rcv_nxt; 298 tcptw->tw_snd_nxt = tp->snd_nxt; 299 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 300 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 301 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 302 tcptw->tw_ts_offset = tp->tsoffset; 303 tw->tw_usec_ts = tp->tcp_usec_ts; 304 tcptw->tw_last_oow_ack_time = 0; 305 tcptw->tw_tx_delay = tp->tcp_tx_delay; 306 tw->tw_txhash = sk->sk_txhash; 307 #if IS_ENABLED(CONFIG_IPV6) 308 if (tw->tw_family == PF_INET6) { 309 struct ipv6_pinfo *np = inet6_sk(sk); 310 311 tw->tw_v6_daddr = sk->sk_v6_daddr; 312 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 313 tw->tw_tclass = np->tclass; 314 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 315 tw->tw_ipv6only = sk->sk_ipv6only; 316 } 317 #endif 318 319 tcp_time_wait_init(sk, tcptw); 320 321 /* Get the TIME_WAIT timeout firing. */ 322 if (timeo < rto) 323 timeo = rto; 324 325 if (state == TCP_TIME_WAIT) 326 timeo = TCP_TIMEWAIT_LEN; 327 328 /* tw_timer is pinned, so we need to make sure BH are disabled 329 * in following section, otherwise timer handler could run before 330 * we complete the initialization. 331 */ 332 local_bh_disable(); 333 inet_twsk_schedule(tw, timeo); 334 /* Linkage updates. 335 * Note that access to tw after this point is illegal. 336 */ 337 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo); 338 local_bh_enable(); 339 } else { 340 /* Sorry, if we're out of memory, just CLOSE this 341 * socket up. We've got bigger problems than 342 * non-graceful socket closings. 343 */ 344 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW); 345 } 346 347 tcp_update_metrics(sk); 348 tcp_done(sk); 349 } 350 EXPORT_SYMBOL(tcp_time_wait); 351 352 void tcp_twsk_destructor(struct sock *sk) 353 { 354 #ifdef CONFIG_TCP_MD5SIG 355 if (static_branch_unlikely(&tcp_md5_needed.key)) { 356 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 357 358 if (twsk->tw_md5_key) { 359 kfree_rcu(twsk->tw_md5_key, rcu); 360 static_branch_slow_dec_deferred(&tcp_md5_needed); 361 } 362 } 363 #endif 364 } 365 EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 366 367 void tcp_twsk_purge(struct list_head *net_exit_list, int family) 368 { 369 bool purged_once = false; 370 struct net *net; 371 372 list_for_each_entry(net, net_exit_list, exit_list) { 373 if (net->ipv4.tcp_death_row.hashinfo->pernet) { 374 /* Even if tw_refcount == 1, we must clean up kernel reqsk */ 375 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family); 376 } else if (!purged_once) { 377 /* The last refcount is decremented in tcp_sk_exit_batch() */ 378 if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1) 379 continue; 380 381 inet_twsk_purge(&tcp_hashinfo, family); 382 purged_once = true; 383 } 384 } 385 } 386 EXPORT_SYMBOL_GPL(tcp_twsk_purge); 387 388 /* Warning : This function is called without sk_listener being locked. 389 * Be sure to read socket fields once, as their value could change under us. 390 */ 391 void tcp_openreq_init_rwin(struct request_sock *req, 392 const struct sock *sk_listener, 393 const struct dst_entry *dst) 394 { 395 struct inet_request_sock *ireq = inet_rsk(req); 396 const struct tcp_sock *tp = tcp_sk(sk_listener); 397 int full_space = tcp_full_space(sk_listener); 398 u32 window_clamp; 399 __u8 rcv_wscale; 400 u32 rcv_wnd; 401 int mss; 402 403 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 404 window_clamp = READ_ONCE(tp->window_clamp); 405 /* Set this up on the first call only */ 406 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); 407 408 /* limit the window selection if the user enforce a smaller rx buffer */ 409 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && 410 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) 411 req->rsk_window_clamp = full_space; 412 413 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); 414 if (rcv_wnd == 0) 415 rcv_wnd = dst_metric(dst, RTAX_INITRWND); 416 else if (full_space < rcv_wnd * mss) 417 full_space = rcv_wnd * mss; 418 419 /* tcp_full_space because it is guaranteed to be the first packet */ 420 tcp_select_initial_window(sk_listener, full_space, 421 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 422 &req->rsk_rcv_wnd, 423 &req->rsk_window_clamp, 424 ireq->wscale_ok, 425 &rcv_wscale, 426 rcv_wnd); 427 ireq->rcv_wscale = rcv_wscale; 428 } 429 EXPORT_SYMBOL(tcp_openreq_init_rwin); 430 431 static void tcp_ecn_openreq_child(struct tcp_sock *tp, 432 const struct request_sock *req) 433 { 434 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 435 } 436 437 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) 438 { 439 struct inet_connection_sock *icsk = inet_csk(sk); 440 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 441 bool ca_got_dst = false; 442 443 if (ca_key != TCP_CA_UNSPEC) { 444 const struct tcp_congestion_ops *ca; 445 446 rcu_read_lock(); 447 ca = tcp_ca_find_key(ca_key); 448 if (likely(ca && bpf_try_module_get(ca, ca->owner))) { 449 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 450 icsk->icsk_ca_ops = ca; 451 ca_got_dst = true; 452 } 453 rcu_read_unlock(); 454 } 455 456 /* If no valid choice made yet, assign current system default ca. */ 457 if (!ca_got_dst && 458 (!icsk->icsk_ca_setsockopt || 459 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) 460 tcp_assign_congestion_control(sk); 461 462 tcp_set_ca_state(sk, TCP_CA_Open); 463 } 464 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); 465 466 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, 467 struct request_sock *req, 468 struct tcp_sock *newtp) 469 { 470 #if IS_ENABLED(CONFIG_SMC) 471 struct inet_request_sock *ireq; 472 473 if (static_branch_unlikely(&tcp_have_smc)) { 474 ireq = inet_rsk(req); 475 if (oldtp->syn_smc && !ireq->smc_ok) 476 newtp->syn_smc = 0; 477 } 478 #endif 479 } 480 481 /* This is not only more efficient than what we used to do, it eliminates 482 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 483 * 484 * Actually, we could lots of memory writes here. tp of listening 485 * socket contains all necessary default parameters. 486 */ 487 struct sock *tcp_create_openreq_child(const struct sock *sk, 488 struct request_sock *req, 489 struct sk_buff *skb) 490 { 491 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 492 const struct inet_request_sock *ireq = inet_rsk(req); 493 struct tcp_request_sock *treq = tcp_rsk(req); 494 struct inet_connection_sock *newicsk; 495 const struct tcp_sock *oldtp; 496 struct tcp_sock *newtp; 497 u32 seq; 498 499 if (!newsk) 500 return NULL; 501 502 newicsk = inet_csk(newsk); 503 newtp = tcp_sk(newsk); 504 oldtp = tcp_sk(sk); 505 506 smc_check_reset_syn_req(oldtp, req, newtp); 507 508 /* Now setup tcp_sock */ 509 newtp->pred_flags = 0; 510 511 seq = treq->rcv_isn + 1; 512 newtp->rcv_wup = seq; 513 WRITE_ONCE(newtp->copied_seq, seq); 514 WRITE_ONCE(newtp->rcv_nxt, seq); 515 newtp->segs_in = 1; 516 517 seq = treq->snt_isn + 1; 518 newtp->snd_sml = newtp->snd_una = seq; 519 WRITE_ONCE(newtp->snd_nxt, seq); 520 newtp->snd_up = seq; 521 522 INIT_LIST_HEAD(&newtp->tsq_node); 523 INIT_LIST_HEAD(&newtp->tsorted_sent_queue); 524 525 tcp_init_wl(newtp, treq->rcv_isn); 526 527 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); 528 newicsk->icsk_ack.lrcvtime = tcp_jiffies32; 529 530 newtp->lsndtime = tcp_jiffies32; 531 newsk->sk_txhash = READ_ONCE(treq->txhash); 532 newtp->total_retrans = req->num_retrans; 533 534 tcp_init_xmit_timers(newsk); 535 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); 536 537 if (sock_flag(newsk, SOCK_KEEPOPEN)) 538 inet_csk_reset_keepalive_timer(newsk, 539 keepalive_time_when(newtp)); 540 541 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 542 newtp->rx_opt.sack_ok = ireq->sack_ok; 543 newtp->window_clamp = req->rsk_window_clamp; 544 newtp->rcv_ssthresh = req->rsk_rcv_wnd; 545 newtp->rcv_wnd = req->rsk_rcv_wnd; 546 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 547 if (newtp->rx_opt.wscale_ok) { 548 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 549 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 550 } else { 551 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 552 newtp->window_clamp = min(newtp->window_clamp, 65535U); 553 } 554 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; 555 newtp->max_window = newtp->snd_wnd; 556 557 if (newtp->rx_opt.tstamp_ok) { 558 newtp->tcp_usec_ts = treq->req_usec_ts; 559 newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent); 560 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); 561 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 562 } else { 563 newtp->tcp_usec_ts = 0; 564 newtp->rx_opt.ts_recent_stamp = 0; 565 newtp->tcp_header_len = sizeof(struct tcphdr); 566 } 567 if (req->num_timeout) { 568 newtp->total_rto = req->num_timeout; 569 newtp->undo_marker = treq->snt_isn; 570 if (newtp->tcp_usec_ts) { 571 newtp->retrans_stamp = treq->snt_synack; 572 newtp->total_rto_time = (u32)(tcp_clock_us() - 573 newtp->retrans_stamp) / USEC_PER_MSEC; 574 } else { 575 newtp->retrans_stamp = div_u64(treq->snt_synack, 576 USEC_PER_SEC / TCP_TS_HZ); 577 newtp->total_rto_time = tcp_clock_ms() - 578 newtp->retrans_stamp; 579 } 580 newtp->total_rto_recoveries = 1; 581 } 582 newtp->tsoffset = treq->ts_off; 583 #ifdef CONFIG_TCP_MD5SIG 584 newtp->md5sig_info = NULL; /*XXX*/ 585 #endif 586 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 587 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 588 newtp->rx_opt.mss_clamp = req->mss; 589 tcp_ecn_openreq_child(newtp, req); 590 newtp->fastopen_req = NULL; 591 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); 592 593 newtp->bpf_chg_cc_inprogress = 0; 594 tcp_bpf_clone(sk, newsk); 595 596 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 597 598 return newsk; 599 } 600 EXPORT_SYMBOL(tcp_create_openreq_child); 601 602 /* 603 * Process an incoming packet for SYN_RECV sockets represented as a 604 * request_sock. Normally sk is the listener socket but for TFO it 605 * points to the child socket. 606 * 607 * XXX (TFO) - The current impl contains a special check for ack 608 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? 609 * 610 * We don't need to initialize tmp_opt.sack_ok as we don't use the results 611 * 612 * Note: If @fastopen is true, this can be called from process context. 613 * Otherwise, this is from BH context. 614 */ 615 616 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 617 struct request_sock *req, 618 bool fastopen, bool *req_stolen) 619 { 620 struct tcp_options_received tmp_opt; 621 struct sock *child; 622 const struct tcphdr *th = tcp_hdr(skb); 623 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 624 bool paws_reject = false; 625 bool own_req; 626 627 tmp_opt.saw_tstamp = 0; 628 if (th->doff > (sizeof(struct tcphdr)>>2)) { 629 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); 630 631 if (tmp_opt.saw_tstamp) { 632 tmp_opt.ts_recent = READ_ONCE(req->ts_recent); 633 if (tmp_opt.rcv_tsecr) 634 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; 635 /* We do not store true stamp, but it is not required, 636 * it can be estimated (approximately) 637 * from another data. 638 */ 639 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ; 640 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 641 } 642 } 643 644 /* Check for pure retransmitted SYN. */ 645 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 646 flg == TCP_FLAG_SYN && 647 !paws_reject) { 648 /* 649 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 650 * this case on figure 6 and figure 8, but formal 651 * protocol description says NOTHING. 652 * To be more exact, it says that we should send ACK, 653 * because this segment (at least, if it has no data) 654 * is out of window. 655 * 656 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 657 * describe SYN-RECV state. All the description 658 * is wrong, we cannot believe to it and should 659 * rely only on common sense and implementation 660 * experience. 661 * 662 * Enforce "SYN-ACK" according to figure 8, figure 6 663 * of RFC793, fixed by RFC1122. 664 * 665 * Note that even if there is new data in the SYN packet 666 * they will be thrown away too. 667 * 668 * Reset timer after retransmitting SYNACK, similar to 669 * the idea of fast retransmit in recovery. 670 */ 671 if (!tcp_oow_rate_limited(sock_net(sk), skb, 672 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 673 &tcp_rsk(req)->last_oow_ack_time) && 674 675 !inet_rtx_syn_ack(sk, req)) { 676 unsigned long expires = jiffies; 677 678 expires += reqsk_timeout(req, TCP_RTO_MAX); 679 if (!fastopen) 680 mod_timer_pending(&req->rsk_timer, expires); 681 else 682 req->rsk_timer.expires = expires; 683 } 684 return NULL; 685 } 686 687 /* Further reproduces section "SEGMENT ARRIVES" 688 for state SYN-RECEIVED of RFC793. 689 It is broken, however, it does not work only 690 when SYNs are crossed. 691 692 You would think that SYN crossing is impossible here, since 693 we should have a SYN_SENT socket (from connect()) on our end, 694 but this is not true if the crossed SYNs were sent to both 695 ends by a malicious third party. We must defend against this, 696 and to do that we first verify the ACK (as per RFC793, page 697 36) and reset if it is invalid. Is this a true full defense? 698 To convince ourselves, let us consider a way in which the ACK 699 test can still pass in this 'malicious crossed SYNs' case. 700 Malicious sender sends identical SYNs (and thus identical sequence 701 numbers) to both A and B: 702 703 A: gets SYN, seq=7 704 B: gets SYN, seq=7 705 706 By our good fortune, both A and B select the same initial 707 send sequence number of seven :-) 708 709 A: sends SYN|ACK, seq=7, ack_seq=8 710 B: sends SYN|ACK, seq=7, ack_seq=8 711 712 So we are now A eating this SYN|ACK, ACK test passes. So 713 does sequence test, SYN is truncated, and thus we consider 714 it a bare ACK. 715 716 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 717 bare ACK. Otherwise, we create an established connection. Both 718 ends (listening sockets) accept the new incoming connection and try 719 to talk to each other. 8-) 720 721 Note: This case is both harmless, and rare. Possibility is about the 722 same as us discovering intelligent life on another plant tomorrow. 723 724 But generally, we should (RFC lies!) to accept ACK 725 from SYNACK both here and in tcp_rcv_state_process(). 726 tcp_rcv_state_process() does not, hence, we do not too. 727 728 Note that the case is absolutely generic: 729 we cannot optimize anything here without 730 violating protocol. All the checks must be made 731 before attempt to create socket. 732 */ 733 734 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 735 * and the incoming segment acknowledges something not yet 736 * sent (the segment carries an unacceptable ACK) ... 737 * a reset is sent." 738 * 739 * Invalid ACK: reset will be sent by listening socket. 740 * Note that the ACK validity check for a Fast Open socket is done 741 * elsewhere and is checked directly against the child socket rather 742 * than req because user data may have been sent out. 743 */ 744 if ((flg & TCP_FLAG_ACK) && !fastopen && 745 (TCP_SKB_CB(skb)->ack_seq != 746 tcp_rsk(req)->snt_isn + 1)) 747 return sk; 748 749 /* Also, it would be not so bad idea to check rcv_tsecr, which 750 * is essentially ACK extension and too early or too late values 751 * should cause reset in unsynchronized states. 752 */ 753 754 /* RFC793: "first check sequence number". */ 755 756 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 757 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { 758 /* Out of window: send ACK and drop. */ 759 if (!(flg & TCP_FLAG_RST) && 760 !tcp_oow_rate_limited(sock_net(sk), skb, 761 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 762 &tcp_rsk(req)->last_oow_ack_time)) 763 req->rsk_ops->send_ack(sk, skb, req); 764 if (paws_reject) 765 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 766 return NULL; 767 } 768 769 /* In sequence, PAWS is OK. */ 770 771 /* TODO: We probably should defer ts_recent change once 772 * we take ownership of @req. 773 */ 774 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) 775 WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval); 776 777 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 778 /* Truncate SYN, it is out of window starting 779 at tcp_rsk(req)->rcv_isn + 1. */ 780 flg &= ~TCP_FLAG_SYN; 781 } 782 783 /* RFC793: "second check the RST bit" and 784 * "fourth, check the SYN bit" 785 */ 786 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 787 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 788 goto embryonic_reset; 789 } 790 791 /* ACK sequence verified above, just make sure ACK is 792 * set. If ACK not set, just silently drop the packet. 793 * 794 * XXX (TFO) - if we ever allow "data after SYN", the 795 * following check needs to be removed. 796 */ 797 if (!(flg & TCP_FLAG_ACK)) 798 return NULL; 799 800 /* For Fast Open no more processing is needed (sk is the 801 * child socket). 802 */ 803 if (fastopen) 804 return sk; 805 806 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 807 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && 808 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 809 inet_rsk(req)->acked = 1; 810 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 811 return NULL; 812 } 813 814 /* OK, ACK is valid, create big socket and 815 * feed this segment to it. It will repeat all 816 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 817 * ESTABLISHED STATE. If it will be dropped after 818 * socket is created, wait for troubles. 819 */ 820 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 821 req, &own_req); 822 if (!child) 823 goto listen_overflow; 824 825 if (own_req && rsk_drop_req(req)) { 826 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); 827 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); 828 return child; 829 } 830 831 sock_rps_save_rxhash(child, skb); 832 tcp_synack_rtt_meas(child, req); 833 *req_stolen = !own_req; 834 return inet_csk_complete_hashdance(sk, child, req, own_req); 835 836 listen_overflow: 837 if (sk != req->rsk_listener) 838 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 839 840 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { 841 inet_rsk(req)->acked = 1; 842 return NULL; 843 } 844 845 embryonic_reset: 846 if (!(flg & TCP_FLAG_RST)) { 847 /* Received a bad SYN pkt - for TFO We try not to reset 848 * the local connection unless it's really necessary to 849 * avoid becoming vulnerable to outside attack aiming at 850 * resetting legit local connections. 851 */ 852 req->rsk_ops->send_reset(sk, skb); 853 } else if (fastopen) { /* received a valid RST pkt */ 854 reqsk_fastopen_remove(sk, req, true); 855 tcp_reset(sk, skb); 856 } 857 if (!fastopen) { 858 bool unlinked = inet_csk_reqsk_queue_drop(sk, req); 859 860 if (unlinked) 861 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 862 *req_stolen = !unlinked; 863 } 864 return NULL; 865 } 866 EXPORT_SYMBOL(tcp_check_req); 867 868 /* 869 * Queue segment on the new socket if the new socket is active, 870 * otherwise we just shortcircuit this and continue with 871 * the new socket. 872 * 873 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV 874 * when entering. But other states are possible due to a race condition 875 * where after __inet_lookup_established() fails but before the listener 876 * locked is obtained, other packets cause the same connection to 877 * be created. 878 */ 879 880 int tcp_child_process(struct sock *parent, struct sock *child, 881 struct sk_buff *skb) 882 __releases(&((child)->sk_lock.slock)) 883 { 884 int ret = 0; 885 int state = child->sk_state; 886 887 /* record sk_napi_id and sk_rx_queue_mapping of child. */ 888 sk_mark_napi_id_set(child, skb); 889 890 tcp_segs_in(tcp_sk(child), skb); 891 if (!sock_owned_by_user(child)) { 892 ret = tcp_rcv_state_process(child, skb); 893 /* Wakeup parent, send SIGIO */ 894 if (state == TCP_SYN_RECV && child->sk_state != state) 895 parent->sk_data_ready(parent); 896 } else { 897 /* Alas, it is possible again, because we do lookup 898 * in main socket hash table and lock on listening 899 * socket does not protect us more. 900 */ 901 __sk_add_backlog(child, skb); 902 } 903 904 bh_unlock_sock(child); 905 sock_put(child); 906 return ret; 907 } 908 EXPORT_SYMBOL(tcp_child_process); 909