1 /*- 2 * Copyright (c) 2016-2018 3 * Netflix Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * Author: Randall Stewart <rrs@netflix.com> 30 * This work is based on the ACM Queue paper 31 * BBR - Congestion Based Congestion Control 32 * and also numerous discussions with Neal, Yuchung and Van. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 #include "opt_ipsec.h" 41 #include "opt_tcpdebug.h" 42 #include "opt_ratelimit.h" 43 /*#include "opt_kern_tls.h"*/ 44 #include <sys/param.h> 45 #include <sys/module.h> 46 #include <sys/kernel.h> 47 #ifdef TCP_HHOOK 48 #include <sys/hhook.h> 49 #endif 50 #include <sys/malloc.h> 51 #include <sys/mbuf.h> 52 #include <sys/proc.h> 53 #include <sys/socket.h> 54 #include <sys/socketvar.h> 55 #ifdef KERN_TLS 56 #include <sys/sockbuf_tls.h> 57 #endif 58 #include <sys/sysctl.h> 59 #include <sys/systm.h> 60 #include <sys/tree.h> 61 #include <sys/refcount.h> 62 #include <sys/queue.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/lock.h> 66 #include <sys/mutex.h> 67 #include <sys/time.h> 68 #include <vm/uma.h> 69 #include <sys/kern_prefetch.h> 70 71 #include <net/route.h> 72 #include <net/vnet.h> 73 #include <net/ethernet.h> 74 #include <net/bpf.h> 75 76 #define TCPSTATES /* for logging */ 77 78 #include <netinet/in.h> 79 #include <netinet/in_kdtrace.h> 80 #include <netinet/in_pcb.h> 81 #include <netinet/ip.h> 82 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 83 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 84 #include <netinet/ip_var.h> 85 #include <netinet/ip6.h> 86 #include <netinet6/in6_pcb.h> 87 #include <netinet6/ip6_var.h> 88 #include <netinet/tcp.h> 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcpip.h> 94 #include <netinet/tcp_hpts.h> 95 #include <netinet/cc/cc.h> 96 #include <netinet/tcp_log_buf.h> 97 #ifdef TCPDEBUG 98 #include <netinet/tcp_debug.h> 99 #endif /* TCPDEBUG */ 100 #ifdef TCP_OFFLOAD 101 #include <netinet/tcp_offload.h> 102 #endif 103 #ifdef INET6 104 #include <netinet6/tcp6_var.h> 105 #endif 106 #include <netinet/tcp_fastopen.h> 107 108 #include <netipsec/ipsec_support.h> 109 #include <net/if.h> 110 #include <net/if_var.h> 111 112 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 113 #include <netipsec/ipsec.h> 114 #include <netipsec/ipsec6.h> 115 #endif /* IPSEC */ 116 117 #include <netinet/udp.h> 118 #include <netinet/udp_var.h> 119 #include <machine/in_cksum.h> 120 121 #ifdef MAC 122 #include <security/mac/mac_framework.h> 123 #endif 124 #include "rack_bbr_common.h" 125 126 /* 127 * Common TCP Functions - These are shared by borth 128 * rack and BBR. 129 */ 130 131 132 #ifdef KERN_TLS 133 uint32_t 134 ctf_get_opt_tls_size(struct socket *so, uint32_t rwnd) 135 { 136 struct sbtls_info *tls; 137 uint32_t len; 138 139 again: 140 tls = so->so_snd.sb_tls_info; 141 len = tls->sb_params.sb_maxlen; /* max tls payload */ 142 len += tls->sb_params.sb_tls_hlen; /* tls header len */ 143 len += tls->sb_params.sb_tls_tlen; /* tls trailer len */ 144 if ((len * 4) > rwnd) { 145 /* 146 * Stroke this will suck counter and what 147 * else should we do Drew? From the 148 * TCP perspective I am not sure 149 * what should be done... 150 */ 151 if (tls->sb_params.sb_maxlen > 4096) { 152 tls->sb_params.sb_maxlen -= 4096; 153 if (tls->sb_params.sb_maxlen < 4096) 154 tls->sb_params.sb_maxlen = 4096; 155 goto again; 156 } 157 } 158 return (len); 159 } 160 #endif 161 162 int 163 ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int has_pkt) 164 { 165 /* 166 * We are passed a raw change of mbuf packets 167 * that arrived in LRO. They are linked via 168 * the m_nextpkt link in the pkt-headers. 169 * 170 * We process each one by: 171 * a) saving off the next 172 * b) stripping off the ether-header 173 * c) formulating the arguments for 174 * the tfb_tcp_hpts_do_segment 175 * d) calling each mbuf to tfb_tcp_hpts_do_segment 176 * after adjusting the time to match the arrival time. 177 * Note that the LRO code assures no IP options are present. 178 * 179 * The symantics for calling tfb_tcp_hpts_do_segment are the 180 * following: 181 * 1) It returns 0 if all went well and you (the caller) need 182 * to release the lock. 183 * 2) If nxt_pkt is set, then the function will surpress calls 184 * to tfb_tcp_output() since you are promising to call again 185 * with another packet. 186 * 3) If it returns 1, then you must free all the packets being 187 * shipped in, the tcb has been destroyed (or about to be destroyed). 188 */ 189 struct mbuf *m_save; 190 struct ether_header *eh; 191 struct epoch_tracker et; 192 struct tcphdr *th; 193 #ifdef INET6 194 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ 195 #endif 196 #ifdef INET 197 struct ip *ip = NULL; /* Keep compiler happy. */ 198 #endif 199 struct ifnet *ifp; 200 struct timeval tv; 201 int32_t retval, nxt_pkt, tlen, off; 202 uint16_t etype; 203 uint16_t drop_hdrlen; 204 uint8_t iptos, no_vn=0, bpf_req=0; 205 206 /* 207 * This is a bit deceptive, we get the 208 * "info epoch" which is really the network 209 * epoch. This covers us on both any INP 210 * type change but also if the ifp goes 211 * away it covers us as well. 212 */ 213 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 214 if (m && m->m_pkthdr.rcvif) 215 ifp = m->m_pkthdr.rcvif; 216 else 217 ifp = NULL; 218 if (ifp) { 219 bpf_req = bpf_peers_present(ifp->if_bpf); 220 } else { 221 /* 222 * We probably should not work around 223 * but kassert, since lro alwasy sets rcvif. 224 */ 225 no_vn = 1; 226 goto skip_vnet; 227 } 228 CURVNET_SET(ifp->if_vnet); 229 skip_vnet: 230 while (m) { 231 m_save = m->m_nextpkt; 232 m->m_nextpkt = NULL; 233 /* Now lets get the ether header */ 234 eh = mtod(m, struct ether_header *); 235 etype = ntohs(eh->ether_type); 236 /* Let the BPF see the packet */ 237 if (bpf_req && ifp) 238 ETHER_BPF_MTAP(ifp, m); 239 m_adj(m, sizeof(*eh)); 240 /* Trim off the ethernet header */ 241 switch (etype) { 242 #ifdef INET6 243 case ETHERTYPE_IPV6: 244 { 245 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 246 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 247 if (m == NULL) { 248 TCPSTAT_INC(tcps_rcvshort); 249 m_freem(m); 250 goto skipped_pkt; 251 } 252 } 253 ip6 = (struct ip6_hdr *)(eh + 1); 254 th = (struct tcphdr *)(ip6 + 1); 255 tlen = ntohs(ip6->ip6_plen); 256 drop_hdrlen = sizeof(*ip6); 257 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 258 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 259 th->th_sum = m->m_pkthdr.csum_data; 260 else 261 th->th_sum = in6_cksum_pseudo(ip6, tlen, 262 IPPROTO_TCP, m->m_pkthdr.csum_data); 263 th->th_sum ^= 0xffff; 264 } else 265 th->th_sum = in6_cksum(m, IPPROTO_TCP, drop_hdrlen, tlen); 266 if (th->th_sum) { 267 TCPSTAT_INC(tcps_rcvbadsum); 268 m_freem(m); 269 goto skipped_pkt; 270 } 271 /* 272 * Be proactive about unspecified IPv6 address in source. 273 * As we use all-zero to indicate unbounded/unconnected pcb, 274 * unspecified IPv6 address can be used to confuse us. 275 * 276 * Note that packets with unspecified IPv6 destination is 277 * already dropped in ip6_input. 278 */ 279 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 280 /* XXX stat */ 281 m_freem(m); 282 goto skipped_pkt; 283 } 284 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 285 break; 286 } 287 #endif 288 #ifdef INET 289 case ETHERTYPE_IP: 290 { 291 if (m->m_len < sizeof (struct tcpiphdr)) { 292 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 293 == NULL) { 294 TCPSTAT_INC(tcps_rcvshort); 295 m_freem(m); 296 goto skipped_pkt; 297 } 298 } 299 ip = (struct ip *)(eh + 1); 300 th = (struct tcphdr *)(ip + 1); 301 drop_hdrlen = sizeof(*ip); 302 iptos = ip->ip_tos; 303 tlen = ntohs(ip->ip_len) - sizeof(struct ip); 304 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 305 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 306 th->th_sum = m->m_pkthdr.csum_data; 307 else 308 th->th_sum = in_pseudo(ip->ip_src.s_addr, 309 ip->ip_dst.s_addr, 310 htonl(m->m_pkthdr.csum_data + tlen + 311 IPPROTO_TCP)); 312 th->th_sum ^= 0xffff; 313 } else { 314 int len; 315 struct ipovly *ipov = (struct ipovly *)ip; 316 /* 317 * Checksum extended TCP header and data. 318 */ 319 len = drop_hdrlen + tlen; 320 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 321 ipov->ih_len = htons(tlen); 322 th->th_sum = in_cksum(m, len); 323 /* Reset length for SDT probes. */ 324 ip->ip_len = htons(len); 325 /* Reset TOS bits */ 326 ip->ip_tos = iptos; 327 /* Re-initialization for later version check */ 328 ip->ip_v = IPVERSION; 329 ip->ip_hl = sizeof(*ip) >> 2; 330 } 331 if (th->th_sum) { 332 TCPSTAT_INC(tcps_rcvbadsum); 333 m_freem(m); 334 goto skipped_pkt; 335 } 336 break; 337 } 338 #endif 339 } 340 /* 341 * Convert TCP protocol specific fields to host format. 342 */ 343 tcp_fields_to_host(th); 344 345 off = th->th_off << 2; 346 if (off < sizeof (struct tcphdr) || off > tlen) { 347 TCPSTAT_INC(tcps_rcvbadoff); 348 m_freem(m); 349 goto skipped_pkt; 350 } 351 tlen -= off; 352 drop_hdrlen += off; 353 /* 354 * Now lets setup the timeval to be when we should 355 * have been called (if we can). 356 */ 357 m->m_pkthdr.lro_nsegs = 1; 358 if (m->m_flags & M_TSTMP_LRO) { 359 tv.tv_sec = m->m_pkthdr.rcv_tstmp / 1000000000; 360 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000) / 1000; 361 } else { 362 /* Should not be should we kassert instead? */ 363 tcp_get_usecs(&tv); 364 } 365 /* Now what about next packet? */ 366 if (m_save || has_pkt) 367 nxt_pkt = 1; 368 else 369 nxt_pkt = 0; 370 retval = (*tp->t_fb->tfb_do_segment_nounlock)(m, th, so, tp, drop_hdrlen, tlen, 371 iptos, nxt_pkt, &tv); 372 if (retval) { 373 /* We lost the lock and tcb probably */ 374 m = m_save; 375 while (m) { 376 m_save = m->m_nextpkt; 377 m->m_nextpkt = NULL; 378 m_freem(m); 379 m = m_save; 380 } 381 if (no_vn == 0) 382 CURVNET_RESTORE(); 383 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 384 return (retval); 385 } 386 skipped_pkt: 387 m = m_save; 388 } 389 if (no_vn == 0) 390 CURVNET_RESTORE(); 391 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 392 return (retval); 393 } 394 395 int 396 ctf_do_queued_segments(struct socket *so, struct tcpcb *tp, int have_pkt) 397 { 398 struct mbuf *m; 399 400 /* First lets see if we have old packets */ 401 if (tp->t_in_pkt) { 402 m = tp->t_in_pkt; 403 tp->t_in_pkt = NULL; 404 tp->t_tail_pkt = NULL; 405 if (ctf_process_inbound_raw(tp, so, m, have_pkt)) { 406 /* We lost the tcpcb (maybe a RST came in)? */ 407 return (1); 408 } 409 } 410 return (0); 411 } 412 413 uint32_t 414 ctf_outstanding(struct tcpcb *tp) 415 { 416 return (tp->snd_max - tp->snd_una); 417 } 418 419 uint32_t 420 ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked) 421 { 422 if (rc_sacked <= ctf_outstanding(tp)) 423 return (ctf_outstanding(tp) - rc_sacked); 424 else { 425 /* TSNH */ 426 #ifdef INVARIANTS 427 panic("tp:%p rc_sacked:%d > out:%d", 428 tp, rc_sacked, ctf_outstanding(tp)); 429 #endif 430 return (0); 431 } 432 } 433 434 void 435 ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, 436 int32_t rstreason, int32_t tlen) 437 { 438 if (tp != NULL) { 439 tcp_dropwithreset(m, th, tp, tlen, rstreason); 440 INP_WUNLOCK(tp->t_inpcb); 441 } else 442 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 443 } 444 445 /* 446 * ctf_drop_checks returns 1 for you should not proceed. It places 447 * in ret_val what should be returned 1/0 by the caller. The 1 indicates 448 * that the TCB is unlocked and probably dropped. The 0 indicates the 449 * TCB is still valid and locked. 450 */ 451 int 452 ctf_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val) 453 { 454 int32_t todrop; 455 int32_t thflags; 456 int32_t tlen; 457 458 thflags = *thf; 459 tlen = *tlenp; 460 todrop = tp->rcv_nxt - th->th_seq; 461 if (todrop > 0) { 462 if (thflags & TH_SYN) { 463 thflags &= ~TH_SYN; 464 th->th_seq++; 465 if (th->th_urp > 1) 466 th->th_urp--; 467 else 468 thflags &= ~TH_URG; 469 todrop--; 470 } 471 /* 472 * Following if statement from Stevens, vol. 2, p. 960. 473 */ 474 if (todrop > tlen 475 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 476 /* 477 * Any valid FIN must be to the left of the window. 478 * At this point the FIN must be a duplicate or out 479 * of sequence; drop it. 480 */ 481 thflags &= ~TH_FIN; 482 /* 483 * Send an ACK to resynchronize and drop any data. 484 * But keep on processing for RST or ACK. 485 */ 486 tp->t_flags |= TF_ACKNOW; 487 todrop = tlen; 488 TCPSTAT_INC(tcps_rcvduppack); 489 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 490 } else { 491 TCPSTAT_INC(tcps_rcvpartduppack); 492 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 493 } 494 /* 495 * DSACK - add SACK block for dropped range 496 */ 497 if (tp->t_flags & TF_SACK_PERMIT) { 498 tcp_update_sack_list(tp, th->th_seq, th->th_seq + tlen); 499 /* 500 * ACK now, as the next in-sequence segment 501 * will clear the DSACK block again 502 */ 503 tp->t_flags |= TF_ACKNOW; 504 } 505 *drop_hdrlen += todrop; /* drop from the top afterwards */ 506 th->th_seq += todrop; 507 tlen -= todrop; 508 if (th->th_urp > todrop) 509 th->th_urp -= todrop; 510 else { 511 thflags &= ~TH_URG; 512 th->th_urp = 0; 513 } 514 } 515 /* 516 * If segment ends after window, drop trailing data (and PUSH and 517 * FIN); if nothing left, just ACK. 518 */ 519 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 520 if (todrop > 0) { 521 TCPSTAT_INC(tcps_rcvpackafterwin); 522 if (todrop >= tlen) { 523 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 524 /* 525 * If window is closed can only take segments at 526 * window edge, and have to drop data and PUSH from 527 * incoming segments. Continue processing, but 528 * remember to ack. Otherwise, drop segment and 529 * ack. 530 */ 531 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 532 tp->t_flags |= TF_ACKNOW; 533 TCPSTAT_INC(tcps_rcvwinprobe); 534 } else { 535 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 536 return (1); 537 } 538 } else 539 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 540 m_adj(m, -todrop); 541 tlen -= todrop; 542 thflags &= ~(TH_PUSH | TH_FIN); 543 } 544 *thf = thflags; 545 *tlenp = tlen; 546 return (0); 547 } 548 549 /* 550 * The value in ret_val informs the caller 551 * if we dropped the tcb (and lock) or not. 552 * 1 = we dropped it, 0 = the TCB is still locked 553 * and valid. 554 */ 555 void 556 ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val) 557 { 558 /* 559 * Generate an ACK dropping incoming segment if it occupies sequence 560 * space, where the ACK reflects our state. 561 * 562 * We can now skip the test for the RST flag since all paths to this 563 * code happen after packets containing RST have been dropped. 564 * 565 * In the SYN-RECEIVED state, don't send an ACK unless the segment 566 * we received passes the SYN-RECEIVED ACK test. If it fails send a 567 * RST. This breaks the loop in the "LAND" DoS attack, and also 568 * prevents an ACK storm between two listening ports that have been 569 * sent forged SYN segments, each with the source address of the 570 * other. 571 */ 572 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 573 (SEQ_GT(tp->snd_una, th->th_ack) || 574 SEQ_GT(th->th_ack, tp->snd_max))) { 575 *ret_val = 1; 576 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 577 return; 578 } else 579 *ret_val = 0; 580 tp->t_flags |= TF_ACKNOW; 581 if (m) 582 m_freem(m); 583 } 584 585 void 586 ctf_do_drop(struct mbuf *m, struct tcpcb *tp) 587 { 588 589 /* 590 * Drop space held by incoming segment and return. 591 */ 592 if (tp != NULL) 593 INP_WUNLOCK(tp->t_inpcb); 594 if (m) 595 m_freem(m); 596 } 597 598 int 599 ctf_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp) 600 { 601 /* 602 * RFC5961 Section 3.2 603 * 604 * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in 605 * window, we send challenge ACK. 606 * 607 * Note: to take into account delayed ACKs, we should test against 608 * last_ack_sent instead of rcv_nxt. Note 2: we handle special case 609 * of closed window, not covered by the RFC. 610 */ 611 int dropped = 0; 612 613 if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) && 614 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 615 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 616 617 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 618 KASSERT(tp->t_state != TCPS_SYN_SENT, 619 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 620 __func__, th, tp)); 621 622 if (V_tcp_insecure_rst || 623 (tp->last_ack_sent == th->th_seq) || 624 (tp->rcv_nxt == th->th_seq) || 625 ((tp->last_ack_sent - 1) == th->th_seq)) { 626 TCPSTAT_INC(tcps_drops); 627 /* Drop the connection. */ 628 switch (tp->t_state) { 629 case TCPS_SYN_RECEIVED: 630 so->so_error = ECONNREFUSED; 631 goto close; 632 case TCPS_ESTABLISHED: 633 case TCPS_FIN_WAIT_1: 634 case TCPS_FIN_WAIT_2: 635 case TCPS_CLOSE_WAIT: 636 case TCPS_CLOSING: 637 case TCPS_LAST_ACK: 638 so->so_error = ECONNRESET; 639 close: 640 tcp_state_change(tp, TCPS_CLOSED); 641 /* FALLTHROUGH */ 642 default: 643 tp = tcp_close(tp); 644 } 645 dropped = 1; 646 ctf_do_drop(m, tp); 647 } else { 648 TCPSTAT_INC(tcps_badrst); 649 /* Send challenge ACK. */ 650 tcp_respond(tp, mtod(m, void *), th, m, 651 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 652 tp->last_ack_sent = tp->rcv_nxt; 653 } 654 } else { 655 m_freem(m); 656 } 657 return (dropped); 658 } 659 660 /* 661 * The value in ret_val informs the caller 662 * if we dropped the tcb (and lock) or not. 663 * 1 = we dropped it, 0 = the TCB is still locked 664 * and valid. 665 */ 666 void 667 ctf_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ret_val) 668 { 669 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 670 671 TCPSTAT_INC(tcps_badsyn); 672 if (V_tcp_insecure_syn && 673 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 674 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 675 tp = tcp_drop(tp, ECONNRESET); 676 *ret_val = 1; 677 ctf_do_drop(m, tp); 678 } else { 679 /* Send challenge ACK. */ 680 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 681 tp->snd_nxt, TH_ACK); 682 tp->last_ack_sent = tp->rcv_nxt; 683 m = NULL; 684 *ret_val = 0; 685 ctf_do_drop(m, NULL); 686 } 687 } 688 689 /* 690 * bbr_ts_check returns 1 for you should not proceed, the state 691 * machine should return. It places in ret_val what should 692 * be returned 1/0 by the caller (hpts_do_segment). The 1 indicates 693 * that the TCB is unlocked and probably dropped. The 0 indicates the 694 * TCB is still valid and locked. 695 */ 696 int 697 ctf_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 698 int32_t tlen, int32_t thflags, int32_t * ret_val) 699 { 700 701 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 702 /* 703 * Invalidate ts_recent. If this segment updates ts_recent, 704 * the age will be reset later and ts_recent will get a 705 * valid value. If it does not, setting ts_recent to zero 706 * will at least satisfy the requirement that zero be placed 707 * in the timestamp echo reply when ts_recent isn't valid. 708 * The age isn't reset until we get a valid ts_recent 709 * because we don't want out-of-order segments to be dropped 710 * when ts_recent is old. 711 */ 712 tp->ts_recent = 0; 713 } else { 714 TCPSTAT_INC(tcps_rcvduppack); 715 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 716 TCPSTAT_INC(tcps_pawsdrop); 717 *ret_val = 0; 718 if (tlen) { 719 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 720 } else { 721 ctf_do_drop(m, NULL); 722 } 723 return (1); 724 } 725 return (0); 726 } 727 728 void 729 ctf_calc_rwin(struct socket *so, struct tcpcb *tp) 730 { 731 int32_t win; 732 733 /* 734 * Calculate amount of space in receive window, and then do TCP 735 * input processing. Receive window is amount of space in rcv queue, 736 * but not less than advertised window. 737 */ 738 win = sbspace(&so->so_rcv); 739 if (win < 0) 740 win = 0; 741 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 742 } 743 744 void 745 ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, 746 int32_t rstreason, int32_t tlen) 747 { 748 749 if (tp->t_inpcb) { 750 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 751 } 752 tcp_dropwithreset(m, th, tp, tlen, rstreason); 753 INP_WUNLOCK(tp->t_inpcb); 754 } 755 756 uint32_t 757 ctf_fixed_maxseg(struct tcpcb *tp) 758 { 759 int optlen; 760 761 if (tp->t_flags & TF_NOOPT) 762 return (tp->t_maxseg); 763 764 /* 765 * Here we have a simplified code from tcp_addoptions(), 766 * without a proper loop, and having most of paddings hardcoded. 767 * We only consider fixed options that we would send every 768 * time I.e. SACK is not considered. 769 * 770 */ 771 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4) 772 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 773 if (tp->t_flags & TF_RCVD_TSTMP) 774 optlen = TCPOLEN_TSTAMP_APPA; 775 else 776 optlen = 0; 777 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 778 if (tp->t_flags & TF_SIGNATURE) 779 optlen += PAD(TCPOLEN_SIGNATURE); 780 #endif 781 } else { 782 if (tp->t_flags & TF_REQ_TSTMP) 783 optlen = TCPOLEN_TSTAMP_APPA; 784 else 785 optlen = PAD(TCPOLEN_MAXSEG); 786 if (tp->t_flags & TF_REQ_SCALE) 787 optlen += PAD(TCPOLEN_WINDOW); 788 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 789 if (tp->t_flags & TF_SIGNATURE) 790 optlen += PAD(TCPOLEN_SIGNATURE); 791 #endif 792 if (tp->t_flags & TF_SACK_PERMIT) 793 optlen += PAD(TCPOLEN_SACK_PERMITTED); 794 } 795 #undef PAD 796 optlen = min(optlen, TCP_MAXOLEN); 797 return (tp->t_maxseg - optlen); 798 } 799 800 void 801 ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_blocks) 802 { 803 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 804 union tcp_log_stackspecific log; 805 struct timeval tv; 806 807 memset(&log, 0, sizeof(log)); 808 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 809 log.u_bbr.flex8 = num_sack_blks; 810 if (num_sack_blks > 0) { 811 log.u_bbr.flex1 = sack_blocks[0].start; 812 log.u_bbr.flex2 = sack_blocks[0].end; 813 } 814 if (num_sack_blks > 1) { 815 log.u_bbr.flex3 = sack_blocks[1].start; 816 log.u_bbr.flex4 = sack_blocks[1].end; 817 } 818 if (num_sack_blks > 2) { 819 log.u_bbr.flex5 = sack_blocks[2].start; 820 log.u_bbr.flex6 = sack_blocks[2].end; 821 } 822 if (num_sack_blks > 3) { 823 log.u_bbr.applimited = sack_blocks[3].start; 824 log.u_bbr.pkts_out = sack_blocks[3].end; 825 } 826 TCP_LOG_EVENTP(tp, NULL, 827 &tp->t_inpcb->inp_socket->so_rcv, 828 &tp->t_inpcb->inp_socket->so_snd, 829 TCP_SACK_FILTER_RES, 0, 830 0, &log, false, &tv); 831 } 832 } 833 834 uint32_t 835 ctf_decay_count(uint32_t count, uint32_t decay) 836 { 837 /* 838 * Given a count, decay it by a set percentage. The 839 * percentage is in thousands i.e. 100% = 1000, 840 * 19.3% = 193. 841 */ 842 uint64_t perc_count, decay_per; 843 uint32_t decayed_count; 844 if (decay > 1000) { 845 /* We don't raise it */ 846 return (count); 847 } 848 perc_count = count; 849 decay_per = decay; 850 perc_count *= decay_per; 851 perc_count /= 1000; 852 /* 853 * So now perc_count holds the 854 * count decay value. 855 */ 856 decayed_count = count - (uint32_t)perc_count; 857 return (decayed_count); 858 } 859