1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/slab.h> 24 #include <linux/sysctl.h> 25 #include <linux/workqueue.h> 26 #include <net/tcp.h> 27 #include <net/inet_common.h> 28 #include <net/xfrm.h> 29 30 int sysctl_tcp_syncookies __read_mostly = 1; 31 EXPORT_SYMBOL(sysctl_tcp_syncookies); 32 33 int sysctl_tcp_abort_on_overflow __read_mostly; 34 35 struct inet_timewait_death_row tcp_death_row = { 36 .sysctl_max_tw_buckets = NR_FILE * 2, 37 .hashinfo = &tcp_hashinfo, 38 }; 39 EXPORT_SYMBOL_GPL(tcp_death_row); 40 41 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 42 { 43 if (seq == s_win) 44 return true; 45 if (after(end_seq, s_win) && before(seq, e_win)) 46 return true; 47 return seq == e_win && seq == end_seq; 48 } 49 50 static enum tcp_tw_status 51 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, 52 const struct sk_buff *skb, int mib_idx) 53 { 54 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 55 56 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, 57 &tcptw->tw_last_oow_ack_time)) { 58 /* Send ACK. Note, we do not put the bucket, 59 * it will be released by caller. 60 */ 61 return TCP_TW_ACK; 62 } 63 64 /* We are rate-limiting, so just release the tw sock and drop skb. */ 65 inet_twsk_put(tw); 66 return TCP_TW_SUCCESS; 67 } 68 69 /* 70 * * Main purpose of TIME-WAIT state is to close connection gracefully, 71 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 72 * (and, probably, tail of data) and one or more our ACKs are lost. 73 * * What is TIME-WAIT timeout? It is associated with maximal packet 74 * lifetime in the internet, which results in wrong conclusion, that 75 * it is set to catch "old duplicate segments" wandering out of their path. 76 * It is not quite correct. This timeout is calculated so that it exceeds 77 * maximal retransmission timeout enough to allow to lose one (or more) 78 * segments sent by peer and our ACKs. This time may be calculated from RTO. 79 * * When TIME-WAIT socket receives RST, it means that another end 80 * finally closed and we are allowed to kill TIME-WAIT too. 81 * * Second purpose of TIME-WAIT is catching old duplicate segments. 82 * Well, certainly it is pure paranoia, but if we load TIME-WAIT 83 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. 84 * * If we invented some more clever way to catch duplicates 85 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. 86 * 87 * The algorithm below is based on FORMAL INTERPRETATION of RFCs. 88 * When you compare it to RFCs, please, read section SEGMENT ARRIVES 89 * from the very beginning. 90 * 91 * NOTE. With recycling (and later with fin-wait-2) TW bucket 92 * is _not_ stateless. It means, that strictly speaking we must 93 * spinlock it. I do not want! Well, probability of misbehaviour 94 * is ridiculously low and, seems, we could use some mb() tricks 95 * to avoid misread sequence numbers, states etc. --ANK 96 * 97 * We don't need to initialize tmp_out.sack_ok as we don't use the results 98 */ 99 enum tcp_tw_status 100 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 101 const struct tcphdr *th) 102 { 103 struct tcp_options_received tmp_opt; 104 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 105 bool paws_reject = false; 106 107 tmp_opt.saw_tstamp = 0; 108 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 109 tcp_parse_options(skb, &tmp_opt, 0, NULL); 110 111 if (tmp_opt.saw_tstamp) { 112 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 113 tmp_opt.ts_recent = tcptw->tw_ts_recent; 114 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 115 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 116 } 117 } 118 119 if (tw->tw_substate == TCP_FIN_WAIT2) { 120 /* Just repeat all the checks of tcp_rcv_state_process() */ 121 122 /* Out of window, send ACK */ 123 if (paws_reject || 124 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 125 tcptw->tw_rcv_nxt, 126 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 127 return tcp_timewait_check_oow_rate_limit( 128 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); 129 130 if (th->rst) 131 goto kill; 132 133 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) 134 goto kill_with_rst; 135 136 /* Dup ACK? */ 137 if (!th->ack || 138 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 139 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 140 inet_twsk_put(tw); 141 return TCP_TW_SUCCESS; 142 } 143 144 /* New data or FIN. If new data arrive after half-duplex close, 145 * reset. 146 */ 147 if (!th->fin || 148 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) { 149 kill_with_rst: 150 inet_twsk_deschedule(tw); 151 inet_twsk_put(tw); 152 return TCP_TW_RST; 153 } 154 155 /* FIN arrived, enter true time-wait state. */ 156 tw->tw_substate = TCP_TIME_WAIT; 157 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 158 if (tmp_opt.saw_tstamp) { 159 tcptw->tw_ts_recent_stamp = get_seconds(); 160 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 161 } 162 163 if (tcp_death_row.sysctl_tw_recycle && 164 tcptw->tw_ts_recent_stamp && 165 tcp_tw_remember_stamp(tw)) 166 inet_twsk_schedule(tw, tw->tw_timeout); 167 else 168 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 169 return TCP_TW_ACK; 170 } 171 172 /* 173 * Now real TIME-WAIT state. 174 * 175 * RFC 1122: 176 * "When a connection is [...] on TIME-WAIT state [...] 177 * [a TCP] MAY accept a new SYN from the remote TCP to 178 * reopen the connection directly, if it: 179 * 180 * (1) assigns its initial sequence number for the new 181 * connection to be larger than the largest sequence 182 * number it used on the previous connection incarnation, 183 * and 184 * 185 * (2) returns to TIME-WAIT state if the SYN turns out 186 * to be an old duplicate". 187 */ 188 189 if (!paws_reject && 190 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && 191 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 192 /* In window segment, it may be only reset or bare ack. */ 193 194 if (th->rst) { 195 /* This is TIME_WAIT assassination, in two flavors. 196 * Oh well... nobody has a sufficient solution to this 197 * protocol bug yet. 198 */ 199 if (sysctl_tcp_rfc1337 == 0) { 200 kill: 201 inet_twsk_deschedule(tw); 202 inet_twsk_put(tw); 203 return TCP_TW_SUCCESS; 204 } 205 } 206 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 207 208 if (tmp_opt.saw_tstamp) { 209 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 210 tcptw->tw_ts_recent_stamp = get_seconds(); 211 } 212 213 inet_twsk_put(tw); 214 return TCP_TW_SUCCESS; 215 } 216 217 /* Out of window segment. 218 219 All the segments are ACKed immediately. 220 221 The only exception is new SYN. We accept it, if it is 222 not old duplicate and we are not in danger to be killed 223 by delayed old duplicates. RFC check is that it has 224 newer sequence number works at rates <40Mbit/sec. 225 However, if paws works, it is reliable AND even more, 226 we even may relax silly seq space cutoff. 227 228 RED-PEN: we violate main RFC requirement, if this SYN will appear 229 old duplicate (i.e. we receive RST in reply to SYN-ACK), 230 we must return socket to time-wait state. It is not good, 231 but not fatal yet. 232 */ 233 234 if (th->syn && !th->rst && !th->ack && !paws_reject && 235 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || 236 (tmp_opt.saw_tstamp && 237 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 238 u32 isn = tcptw->tw_snd_nxt + 65535 + 2; 239 if (isn == 0) 240 isn++; 241 TCP_SKB_CB(skb)->tcp_tw_isn = isn; 242 return TCP_TW_SYN; 243 } 244 245 if (paws_reject) 246 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); 247 248 if (!th->rst) { 249 /* In this case we must reset the TIMEWAIT timer. 250 * 251 * If it is ACKless SYN it may be both old duplicate 252 * and new good SYN with random sequence number <rcv_nxt. 253 * Do not reschedule in the last case. 254 */ 255 if (paws_reject || th->ack) 256 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 257 258 return tcp_timewait_check_oow_rate_limit( 259 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 260 } 261 inet_twsk_put(tw); 262 return TCP_TW_SUCCESS; 263 } 264 EXPORT_SYMBOL(tcp_timewait_state_process); 265 266 /* 267 * Move a socket to time-wait or dead fin-wait-2 state. 268 */ 269 void tcp_time_wait(struct sock *sk, int state, int timeo) 270 { 271 const struct inet_connection_sock *icsk = inet_csk(sk); 272 const struct tcp_sock *tp = tcp_sk(sk); 273 struct inet_timewait_sock *tw; 274 bool recycle_ok = false; 275 276 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) 277 recycle_ok = tcp_remember_stamp(sk); 278 279 tw = inet_twsk_alloc(sk, &tcp_death_row, state); 280 281 if (tw) { 282 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 283 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 284 struct inet_sock *inet = inet_sk(sk); 285 286 tw->tw_transparent = inet->transparent; 287 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 288 tcptw->tw_rcv_nxt = tp->rcv_nxt; 289 tcptw->tw_snd_nxt = tp->snd_nxt; 290 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 291 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 292 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 293 tcptw->tw_ts_offset = tp->tsoffset; 294 tcptw->tw_last_oow_ack_time = 0; 295 296 #if IS_ENABLED(CONFIG_IPV6) 297 if (tw->tw_family == PF_INET6) { 298 struct ipv6_pinfo *np = inet6_sk(sk); 299 300 tw->tw_v6_daddr = sk->sk_v6_daddr; 301 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 302 tw->tw_tclass = np->tclass; 303 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); 304 tw->tw_ipv6only = sk->sk_ipv6only; 305 } 306 #endif 307 308 #ifdef CONFIG_TCP_MD5SIG 309 /* 310 * The timewait bucket does not have the key DB from the 311 * sock structure. We just make a quick copy of the 312 * md5 key being used (if indeed we are using one) 313 * so the timewait ack generating code has the key. 314 */ 315 do { 316 struct tcp_md5sig_key *key; 317 tcptw->tw_md5_key = NULL; 318 key = tp->af_specific->md5_lookup(sk, sk); 319 if (key) { 320 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 321 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()) 322 BUG(); 323 } 324 } while (0); 325 #endif 326 327 /* Linkage updates. */ 328 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 329 330 /* Get the TIME_WAIT timeout firing. */ 331 if (timeo < rto) 332 timeo = rto; 333 334 if (recycle_ok) { 335 tw->tw_timeout = rto; 336 } else { 337 tw->tw_timeout = TCP_TIMEWAIT_LEN; 338 if (state == TCP_TIME_WAIT) 339 timeo = TCP_TIMEWAIT_LEN; 340 } 341 342 inet_twsk_schedule(tw, timeo); 343 inet_twsk_put(tw); 344 } else { 345 /* Sorry, if we're out of memory, just CLOSE this 346 * socket up. We've got bigger problems than 347 * non-graceful socket closings. 348 */ 349 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); 350 } 351 352 tcp_update_metrics(sk); 353 tcp_done(sk); 354 } 355 356 void tcp_twsk_destructor(struct sock *sk) 357 { 358 #ifdef CONFIG_TCP_MD5SIG 359 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 360 361 if (twsk->tw_md5_key) 362 kfree_rcu(twsk->tw_md5_key, rcu); 363 #endif 364 } 365 EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 366 367 void tcp_openreq_init_rwin(struct request_sock *req, 368 struct sock *sk, struct dst_entry *dst) 369 { 370 struct inet_request_sock *ireq = inet_rsk(req); 371 struct tcp_sock *tp = tcp_sk(sk); 372 __u8 rcv_wscale; 373 int mss = dst_metric_advmss(dst); 374 375 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 376 mss = tp->rx_opt.user_mss; 377 378 /* Set this up on the first call only */ 379 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 380 381 /* limit the window selection if the user enforce a smaller rx buffer */ 382 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 383 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 384 req->window_clamp = tcp_full_space(sk); 385 386 /* tcp_full_space because it is guaranteed to be the first packet */ 387 tcp_select_initial_window(tcp_full_space(sk), 388 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 389 &req->rcv_wnd, 390 &req->window_clamp, 391 ireq->wscale_ok, 392 &rcv_wscale, 393 dst_metric(dst, RTAX_INITRWND)); 394 ireq->rcv_wscale = rcv_wscale; 395 } 396 EXPORT_SYMBOL(tcp_openreq_init_rwin); 397 398 static void tcp_ecn_openreq_child(struct tcp_sock *tp, 399 const struct request_sock *req) 400 { 401 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; 402 } 403 404 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) 405 { 406 struct inet_connection_sock *icsk = inet_csk(sk); 407 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 408 bool ca_got_dst = false; 409 410 if (ca_key != TCP_CA_UNSPEC) { 411 const struct tcp_congestion_ops *ca; 412 413 rcu_read_lock(); 414 ca = tcp_ca_find_key(ca_key); 415 if (likely(ca && try_module_get(ca->owner))) { 416 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 417 icsk->icsk_ca_ops = ca; 418 ca_got_dst = true; 419 } 420 rcu_read_unlock(); 421 } 422 423 /* If no valid choice made yet, assign current system default ca. */ 424 if (!ca_got_dst && 425 (!icsk->icsk_ca_setsockopt || 426 !try_module_get(icsk->icsk_ca_ops->owner))) 427 tcp_assign_congestion_control(sk); 428 429 tcp_set_ca_state(sk, TCP_CA_Open); 430 } 431 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); 432 433 /* This is not only more efficient than what we used to do, it eliminates 434 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 435 * 436 * Actually, we could lots of memory writes here. tp of listening 437 * socket contains all necessary default parameters. 438 */ 439 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) 440 { 441 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 442 443 if (newsk) { 444 const struct inet_request_sock *ireq = inet_rsk(req); 445 struct tcp_request_sock *treq = tcp_rsk(req); 446 struct inet_connection_sock *newicsk = inet_csk(newsk); 447 struct tcp_sock *newtp = tcp_sk(newsk); 448 449 /* Now setup tcp_sock */ 450 newtp->pred_flags = 0; 451 452 newtp->rcv_wup = newtp->copied_seq = 453 newtp->rcv_nxt = treq->rcv_isn + 1; 454 newtp->segs_in = 0; 455 456 newtp->snd_sml = newtp->snd_una = 457 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; 458 459 tcp_prequeue_init(newtp); 460 INIT_LIST_HEAD(&newtp->tsq_node); 461 462 tcp_init_wl(newtp, treq->rcv_isn); 463 464 newtp->srtt_us = 0; 465 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 466 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 467 468 newtp->packets_out = 0; 469 newtp->retrans_out = 0; 470 newtp->sacked_out = 0; 471 newtp->fackets_out = 0; 472 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 473 tcp_enable_early_retrans(newtp); 474 newtp->tlp_high_seq = 0; 475 newtp->lsndtime = treq->snt_synack; 476 newtp->last_oow_ack_time = 0; 477 newtp->total_retrans = req->num_retrans; 478 479 /* So many TCP implementations out there (incorrectly) count the 480 * initial SYN frame in their delayed-ACK and congestion control 481 * algorithms that we must have the following bandaid to talk 482 * efficiently to them. -DaveM 483 */ 484 newtp->snd_cwnd = TCP_INIT_CWND; 485 newtp->snd_cwnd_cnt = 0; 486 487 tcp_init_xmit_timers(newsk); 488 __skb_queue_head_init(&newtp->out_of_order_queue); 489 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; 490 491 newtp->rx_opt.saw_tstamp = 0; 492 493 newtp->rx_opt.dsack = 0; 494 newtp->rx_opt.num_sacks = 0; 495 496 newtp->urg_data = 0; 497 498 if (sock_flag(newsk, SOCK_KEEPOPEN)) 499 inet_csk_reset_keepalive_timer(newsk, 500 keepalive_time_when(newtp)); 501 502 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 503 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { 504 if (sysctl_tcp_fack) 505 tcp_enable_fack(newtp); 506 } 507 newtp->window_clamp = req->window_clamp; 508 newtp->rcv_ssthresh = req->rcv_wnd; 509 newtp->rcv_wnd = req->rcv_wnd; 510 newtp->rx_opt.wscale_ok = ireq->wscale_ok; 511 if (newtp->rx_opt.wscale_ok) { 512 newtp->rx_opt.snd_wscale = ireq->snd_wscale; 513 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; 514 } else { 515 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 516 newtp->window_clamp = min(newtp->window_clamp, 65535U); 517 } 518 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << 519 newtp->rx_opt.snd_wscale); 520 newtp->max_window = newtp->snd_wnd; 521 522 if (newtp->rx_opt.tstamp_ok) { 523 newtp->rx_opt.ts_recent = req->ts_recent; 524 newtp->rx_opt.ts_recent_stamp = get_seconds(); 525 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 526 } else { 527 newtp->rx_opt.ts_recent_stamp = 0; 528 newtp->tcp_header_len = sizeof(struct tcphdr); 529 } 530 newtp->tsoffset = 0; 531 #ifdef CONFIG_TCP_MD5SIG 532 newtp->md5sig_info = NULL; /*XXX*/ 533 if (newtp->af_specific->md5_lookup(sk, newsk)) 534 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 535 #endif 536 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) 537 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 538 newtp->rx_opt.mss_clamp = req->mss; 539 tcp_ecn_openreq_child(newtp, req); 540 newtp->fastopen_rsk = NULL; 541 newtp->syn_data_acked = 0; 542 543 newtp->saved_syn = req->saved_syn; 544 req->saved_syn = NULL; 545 546 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); 547 } 548 return newsk; 549 } 550 EXPORT_SYMBOL(tcp_create_openreq_child); 551 552 /* 553 * Process an incoming packet for SYN_RECV sockets represented as a 554 * request_sock. Normally sk is the listener socket but for TFO it 555 * points to the child socket. 556 * 557 * XXX (TFO) - The current impl contains a special check for ack 558 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? 559 * 560 * We don't need to initialize tmp_opt.sack_ok as we don't use the results 561 */ 562 563 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 564 struct request_sock *req, 565 bool fastopen) 566 { 567 struct tcp_options_received tmp_opt; 568 struct sock *child; 569 const struct tcphdr *th = tcp_hdr(skb); 570 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 571 bool paws_reject = false; 572 573 BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN)); 574 575 tmp_opt.saw_tstamp = 0; 576 if (th->doff > (sizeof(struct tcphdr)>>2)) { 577 tcp_parse_options(skb, &tmp_opt, 0, NULL); 578 579 if (tmp_opt.saw_tstamp) { 580 tmp_opt.ts_recent = req->ts_recent; 581 /* We do not store true stamp, but it is not required, 582 * it can be estimated (approximately) 583 * from another data. 584 */ 585 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); 586 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 587 } 588 } 589 590 /* Check for pure retransmitted SYN. */ 591 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 592 flg == TCP_FLAG_SYN && 593 !paws_reject) { 594 /* 595 * RFC793 draws (Incorrectly! It was fixed in RFC1122) 596 * this case on figure 6 and figure 8, but formal 597 * protocol description says NOTHING. 598 * To be more exact, it says that we should send ACK, 599 * because this segment (at least, if it has no data) 600 * is out of window. 601 * 602 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT 603 * describe SYN-RECV state. All the description 604 * is wrong, we cannot believe to it and should 605 * rely only on common sense and implementation 606 * experience. 607 * 608 * Enforce "SYN-ACK" according to figure 8, figure 6 609 * of RFC793, fixed by RFC1122. 610 * 611 * Note that even if there is new data in the SYN packet 612 * they will be thrown away too. 613 * 614 * Reset timer after retransmitting SYNACK, similar to 615 * the idea of fast retransmit in recovery. 616 */ 617 if (!tcp_oow_rate_limited(sock_net(sk), skb, 618 LINUX_MIB_TCPACKSKIPPEDSYNRECV, 619 &tcp_rsk(req)->last_oow_ack_time) && 620 621 !inet_rtx_syn_ack(sk, req)) { 622 unsigned long expires = jiffies; 623 624 expires += min(TCP_TIMEOUT_INIT << req->num_timeout, 625 TCP_RTO_MAX); 626 if (!fastopen) 627 mod_timer_pending(&req->rsk_timer, expires); 628 else 629 req->rsk_timer.expires = expires; 630 } 631 return NULL; 632 } 633 634 /* Further reproduces section "SEGMENT ARRIVES" 635 for state SYN-RECEIVED of RFC793. 636 It is broken, however, it does not work only 637 when SYNs are crossed. 638 639 You would think that SYN crossing is impossible here, since 640 we should have a SYN_SENT socket (from connect()) on our end, 641 but this is not true if the crossed SYNs were sent to both 642 ends by a malicious third party. We must defend against this, 643 and to do that we first verify the ACK (as per RFC793, page 644 36) and reset if it is invalid. Is this a true full defense? 645 To convince ourselves, let us consider a way in which the ACK 646 test can still pass in this 'malicious crossed SYNs' case. 647 Malicious sender sends identical SYNs (and thus identical sequence 648 numbers) to both A and B: 649 650 A: gets SYN, seq=7 651 B: gets SYN, seq=7 652 653 By our good fortune, both A and B select the same initial 654 send sequence number of seven :-) 655 656 A: sends SYN|ACK, seq=7, ack_seq=8 657 B: sends SYN|ACK, seq=7, ack_seq=8 658 659 So we are now A eating this SYN|ACK, ACK test passes. So 660 does sequence test, SYN is truncated, and thus we consider 661 it a bare ACK. 662 663 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 664 bare ACK. Otherwise, we create an established connection. Both 665 ends (listening sockets) accept the new incoming connection and try 666 to talk to each other. 8-) 667 668 Note: This case is both harmless, and rare. Possibility is about the 669 same as us discovering intelligent life on another plant tomorrow. 670 671 But generally, we should (RFC lies!) to accept ACK 672 from SYNACK both here and in tcp_rcv_state_process(). 673 tcp_rcv_state_process() does not, hence, we do not too. 674 675 Note that the case is absolutely generic: 676 we cannot optimize anything here without 677 violating protocol. All the checks must be made 678 before attempt to create socket. 679 */ 680 681 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 682 * and the incoming segment acknowledges something not yet 683 * sent (the segment carries an unacceptable ACK) ... 684 * a reset is sent." 685 * 686 * Invalid ACK: reset will be sent by listening socket. 687 * Note that the ACK validity check for a Fast Open socket is done 688 * elsewhere and is checked directly against the child socket rather 689 * than req because user data may have been sent out. 690 */ 691 if ((flg & TCP_FLAG_ACK) && !fastopen && 692 (TCP_SKB_CB(skb)->ack_seq != 693 tcp_rsk(req)->snt_isn + 1)) 694 return sk; 695 696 /* Also, it would be not so bad idea to check rcv_tsecr, which 697 * is essentially ACK extension and too early or too late values 698 * should cause reset in unsynchronized states. 699 */ 700 701 /* RFC793: "first check sequence number". */ 702 703 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 704 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) { 705 /* Out of window: send ACK and drop. */ 706 if (!(flg & TCP_FLAG_RST)) 707 req->rsk_ops->send_ack(sk, skb, req); 708 if (paws_reject) 709 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 710 return NULL; 711 } 712 713 /* In sequence, PAWS is OK. */ 714 715 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) 716 req->ts_recent = tmp_opt.rcv_tsval; 717 718 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 719 /* Truncate SYN, it is out of window starting 720 at tcp_rsk(req)->rcv_isn + 1. */ 721 flg &= ~TCP_FLAG_SYN; 722 } 723 724 /* RFC793: "second check the RST bit" and 725 * "fourth, check the SYN bit" 726 */ 727 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 728 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 729 goto embryonic_reset; 730 } 731 732 /* ACK sequence verified above, just make sure ACK is 733 * set. If ACK not set, just silently drop the packet. 734 * 735 * XXX (TFO) - if we ever allow "data after SYN", the 736 * following check needs to be removed. 737 */ 738 if (!(flg & TCP_FLAG_ACK)) 739 return NULL; 740 741 /* For Fast Open no more processing is needed (sk is the 742 * child socket). 743 */ 744 if (fastopen) 745 return sk; 746 747 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 748 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 749 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 750 inet_rsk(req)->acked = 1; 751 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 752 return NULL; 753 } 754 755 /* OK, ACK is valid, create big socket and 756 * feed this segment to it. It will repeat all 757 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 758 * ESTABLISHED STATE. If it will be dropped after 759 * socket is created, wait for troubles. 760 */ 761 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 762 if (!child) 763 goto listen_overflow; 764 765 inet_csk_reqsk_queue_drop(sk, req); 766 inet_csk_reqsk_queue_add(sk, req, child); 767 /* Warning: caller must not call reqsk_put(req); 768 * child stole last reference on it. 769 */ 770 return child; 771 772 listen_overflow: 773 if (!sysctl_tcp_abort_on_overflow) { 774 inet_rsk(req)->acked = 1; 775 return NULL; 776 } 777 778 embryonic_reset: 779 if (!(flg & TCP_FLAG_RST)) { 780 /* Received a bad SYN pkt - for TFO We try not to reset 781 * the local connection unless it's really necessary to 782 * avoid becoming vulnerable to outside attack aiming at 783 * resetting legit local connections. 784 */ 785 req->rsk_ops->send_reset(sk, skb); 786 } else if (fastopen) { /* received a valid RST pkt */ 787 reqsk_fastopen_remove(sk, req, true); 788 tcp_reset(sk); 789 } 790 if (!fastopen) { 791 inet_csk_reqsk_queue_drop(sk, req); 792 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 793 } 794 return NULL; 795 } 796 EXPORT_SYMBOL(tcp_check_req); 797 798 /* 799 * Queue segment on the new socket if the new socket is active, 800 * otherwise we just shortcircuit this and continue with 801 * the new socket. 802 * 803 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV 804 * when entering. But other states are possible due to a race condition 805 * where after __inet_lookup_established() fails but before the listener 806 * locked is obtained, other packets cause the same connection to 807 * be created. 808 */ 809 810 int tcp_child_process(struct sock *parent, struct sock *child, 811 struct sk_buff *skb) 812 { 813 int ret = 0; 814 int state = child->sk_state; 815 816 if (!sock_owned_by_user(child)) { 817 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), 818 skb->len); 819 /* Wakeup parent, send SIGIO */ 820 if (state == TCP_SYN_RECV && child->sk_state != state) 821 parent->sk_data_ready(parent); 822 } else { 823 /* Alas, it is possible again, because we do lookup 824 * in main socket hash table and lock on listening 825 * socket does not protect us more. 826 */ 827 __sk_add_backlog(child, skb); 828 } 829 830 bh_unlock_sock(child); 831 sock_put(child); 832 return ret; 833 } 834 EXPORT_SYMBOL(tcp_child_process); 835