1 /*- 2 * Copyright (c) 2016-9 3 * Netflix Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * Author: Randall Stewart <rrs@netflix.com> 30 * This work is based on the ACM Queue paper 31 * BBR - Congestion Based Congestion Control 32 * and also numerous discussions with Neal, Yuchung and Van. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 #include "opt_ipsec.h" 41 #include "opt_tcpdebug.h" 42 #include "opt_ratelimit.h" 43 #include "opt_kern_tls.h" 44 #include <sys/param.h> 45 #include <sys/arb.h> 46 #include <sys/module.h> 47 #include <sys/kernel.h> 48 #ifdef TCP_HHOOK 49 #include <sys/hhook.h> 50 #endif 51 #include <sys/malloc.h> 52 #include <sys/mbuf.h> 53 #include <sys/proc.h> 54 #include <sys/qmath.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #ifdef KERN_TLS 58 #include <sys/ktls.h> 59 #endif 60 #include <sys/sysctl.h> 61 #include <sys/systm.h> 62 #include <sys/tree.h> 63 #ifdef NETFLIX_STATS 64 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 65 #endif 66 #include <sys/refcount.h> 67 #include <sys/queue.h> 68 #include <sys/smp.h> 69 #include <sys/kthread.h> 70 #include <sys/lock.h> 71 #include <sys/mutex.h> 72 #include <sys/tim_filter.h> 73 #include <sys/time.h> 74 #include <vm/uma.h> 75 #include <sys/kern_prefetch.h> 76 77 #include <net/route.h> 78 #include <net/vnet.h> 79 #include <net/ethernet.h> 80 #include <net/bpf.h> 81 82 #define TCPSTATES /* for logging */ 83 84 #include <netinet/in.h> 85 #include <netinet/in_kdtrace.h> 86 #include <netinet/in_pcb.h> 87 #include <netinet/ip.h> 88 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 89 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 90 #include <netinet/ip_var.h> 91 #include <netinet/ip6.h> 92 #include <netinet6/in6_pcb.h> 93 #include <netinet6/ip6_var.h> 94 #include <netinet/tcp.h> 95 #include <netinet/tcp_fsm.h> 96 #include <netinet/tcp_seq.h> 97 #include <netinet/tcp_timer.h> 98 #include <netinet/tcp_var.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/tcp_hpts.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/tcp_log_buf.h> 103 #ifdef TCPDEBUG 104 #include <netinet/tcp_debug.h> 105 #endif /* TCPDEBUG */ 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_fastopen.h> 113 114 #include <netipsec/ipsec_support.h> 115 #include <net/if.h> 116 #include <net/if_var.h> 117 118 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 119 #include <netipsec/ipsec.h> 120 #include <netipsec/ipsec6.h> 121 #endif /* IPSEC */ 122 123 #include <netinet/udp.h> 124 #include <netinet/udp_var.h> 125 #include <machine/in_cksum.h> 126 127 #ifdef MAC 128 #include <security/mac/mac_framework.h> 129 #endif 130 #include "rack_bbr_common.h" 131 132 /* 133 * Common TCP Functions - These are shared by borth 134 * rack and BBR. 135 */ 136 #ifdef KERN_TLS 137 uint32_t 138 ctf_get_opt_tls_size(struct socket *so, uint32_t rwnd) 139 { 140 struct ktls_session *tls; 141 uint32_t len; 142 143 again: 144 tls = so->so_snd.sb_tls_info; 145 len = tls->params.max_frame_len; /* max tls payload */ 146 len += tls->params.tls_hlen; /* tls header len */ 147 len += tls->params.tls_tlen; /* tls trailer len */ 148 if ((len * 4) > rwnd) { 149 /* 150 * Stroke this will suck counter and what 151 * else should we do Drew? From the 152 * TCP perspective I am not sure 153 * what should be done... 154 */ 155 if (tls->params.max_frame_len > 4096) { 156 tls->params.max_frame_len -= 4096; 157 if (tls->params.max_frame_len < 4096) 158 tls->params.max_frame_len = 4096; 159 goto again; 160 } 161 } 162 return (len); 163 } 164 #endif 165 166 167 /* 168 * The function ctf_process_inbound_raw() is used by 169 * transport developers to do the steps needed to 170 * support MBUF Queuing i.e. the flags in 171 * inp->inp_flags2: 172 * 173 * - INP_SUPPORTS_MBUFQ 174 * - INP_MBUF_QUEUE_READY 175 * - INP_DONT_SACK_QUEUE 176 * 177 * These flags help control how LRO will deliver 178 * packets to the transport. You first set in inp_flags2 179 * the INP_SUPPORTS_MBUFQ to tell the LRO code that you 180 * will gladly take a queue of packets instead of a compressed 181 * single packet. You also set in your t_fb pointer the 182 * tfb_do_queued_segments to point to ctf_process_inbound_raw. 183 * 184 * This then gets you lists of inbound ACK's/Data instead 185 * of a condensed compressed ACK/DATA packet. Why would you 186 * want that? This will get you access to all the arrival 187 * times of at least LRO and possibly at the Hardware (if 188 * the interface card supports that) of the actual ACK/DATA. 189 * In some transport designs this is important since knowing 190 * the actual time we got the packet is useful information. 191 * 192 * Now there are some interesting Caveats that the transport 193 * designer needs to take into account when using this feature. 194 * 195 * 1) It is used with HPTS and pacing, when the pacing timer 196 * for output calls it will first call the input. 197 * 2) When you set INP_MBUF_QUEUE_READY this tells LRO 198 * queue normal packets, I am busy pacing out data and 199 * will process the queued packets before my tfb_tcp_output 200 * call from pacing. If a non-normal packet arrives, (e.g. sack) 201 * you will be awoken immediately. 202 * 3) Finally you can add the INP_DONT_SACK_QUEUE to not even 203 * be awoken if a SACK has arrived. You would do this when 204 * you were not only running a pacing for output timer 205 * but a Rack timer as well i.e. you know you are in recovery 206 * and are in the process (via the timers) of dealing with 207 * the loss. 208 * 209 * Now a critical thing you must be aware of here is that the 210 * use of the flags has a far greater scope then just your 211 * typical LRO. Why? Well thats because in the normal compressed 212 * LRO case at the end of a driver interupt all packets are going 213 * to get presented to the transport no matter if there is one 214 * or 100. With the MBUF_QUEUE model, this is not true. You will 215 * only be awoken to process the queue of packets when: 216 * a) The flags discussed above allow it. 217 * <or> 218 * b) You exceed a ack or data limit (by default the 219 * ack limit is infinity (64k acks) and the data 220 * limit is 64k of new TCP data) 221 * <or> 222 * c) The push bit has been set by the peer 223 */ 224 225 int 226 ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int has_pkt) 227 { 228 /* 229 * We are passed a raw change of mbuf packets 230 * that arrived in LRO. They are linked via 231 * the m_nextpkt link in the pkt-headers. 232 * 233 * We process each one by: 234 * a) saving off the next 235 * b) stripping off the ether-header 236 * c) formulating the arguments for 237 * the tfb_tcp_hpts_do_segment 238 * d) calling each mbuf to tfb_tcp_hpts_do_segment 239 * after adjusting the time to match the arrival time. 240 * Note that the LRO code assures no IP options are present. 241 * 242 * The symantics for calling tfb_tcp_hpts_do_segment are the 243 * following: 244 * 1) It returns 0 if all went well and you (the caller) need 245 * to release the lock. 246 * 2) If nxt_pkt is set, then the function will surpress calls 247 * to tfb_tcp_output() since you are promising to call again 248 * with another packet. 249 * 3) If it returns 1, then you must free all the packets being 250 * shipped in, the tcb has been destroyed (or about to be destroyed). 251 */ 252 struct mbuf *m_save; 253 struct ether_header *eh; 254 struct tcphdr *th; 255 #ifdef INET6 256 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ 257 #endif 258 #ifdef INET 259 struct ip *ip = NULL; /* Keep compiler happy. */ 260 #endif 261 struct ifnet *ifp; 262 struct timeval tv; 263 int32_t retval, nxt_pkt, tlen, off; 264 uint16_t etype; 265 uint16_t drop_hdrlen; 266 uint8_t iptos, no_vn=0, bpf_req=0; 267 268 NET_EPOCH_ASSERT(); 269 270 if (m && m->m_pkthdr.rcvif) 271 ifp = m->m_pkthdr.rcvif; 272 else 273 ifp = NULL; 274 if (ifp) { 275 bpf_req = bpf_peers_present(ifp->if_bpf); 276 } else { 277 /* 278 * We probably should not work around 279 * but kassert, since lro alwasy sets rcvif. 280 */ 281 no_vn = 1; 282 goto skip_vnet; 283 } 284 CURVNET_SET(ifp->if_vnet); 285 skip_vnet: 286 while (m) { 287 m_save = m->m_nextpkt; 288 m->m_nextpkt = NULL; 289 /* Now lets get the ether header */ 290 eh = mtod(m, struct ether_header *); 291 etype = ntohs(eh->ether_type); 292 /* Let the BPF see the packet */ 293 if (bpf_req && ifp) 294 ETHER_BPF_MTAP(ifp, m); 295 m_adj(m, sizeof(*eh)); 296 /* Trim off the ethernet header */ 297 switch (etype) { 298 #ifdef INET6 299 case ETHERTYPE_IPV6: 300 { 301 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 302 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 303 if (m == NULL) { 304 KMOD_TCPSTAT_INC(tcps_rcvshort); 305 m_freem(m); 306 goto skipped_pkt; 307 } 308 } 309 ip6 = (struct ip6_hdr *)(eh + 1); 310 th = (struct tcphdr *)(ip6 + 1); 311 tlen = ntohs(ip6->ip6_plen); 312 drop_hdrlen = sizeof(*ip6); 313 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 314 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 315 th->th_sum = m->m_pkthdr.csum_data; 316 else 317 th->th_sum = in6_cksum_pseudo(ip6, tlen, 318 IPPROTO_TCP, m->m_pkthdr.csum_data); 319 th->th_sum ^= 0xffff; 320 } else 321 th->th_sum = in6_cksum(m, IPPROTO_TCP, drop_hdrlen, tlen); 322 if (th->th_sum) { 323 KMOD_TCPSTAT_INC(tcps_rcvbadsum); 324 m_freem(m); 325 goto skipped_pkt; 326 } 327 /* 328 * Be proactive about unspecified IPv6 address in source. 329 * As we use all-zero to indicate unbounded/unconnected pcb, 330 * unspecified IPv6 address can be used to confuse us. 331 * 332 * Note that packets with unspecified IPv6 destination is 333 * already dropped in ip6_input. 334 */ 335 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 336 /* XXX stat */ 337 m_freem(m); 338 goto skipped_pkt; 339 } 340 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 341 break; 342 } 343 #endif 344 #ifdef INET 345 case ETHERTYPE_IP: 346 { 347 if (m->m_len < sizeof (struct tcpiphdr)) { 348 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 349 == NULL) { 350 KMOD_TCPSTAT_INC(tcps_rcvshort); 351 m_freem(m); 352 goto skipped_pkt; 353 } 354 } 355 ip = (struct ip *)(eh + 1); 356 th = (struct tcphdr *)(ip + 1); 357 drop_hdrlen = sizeof(*ip); 358 iptos = ip->ip_tos; 359 tlen = ntohs(ip->ip_len) - sizeof(struct ip); 360 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 361 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 362 th->th_sum = m->m_pkthdr.csum_data; 363 else 364 th->th_sum = in_pseudo(ip->ip_src.s_addr, 365 ip->ip_dst.s_addr, 366 htonl(m->m_pkthdr.csum_data + tlen + 367 IPPROTO_TCP)); 368 th->th_sum ^= 0xffff; 369 } else { 370 int len; 371 struct ipovly *ipov = (struct ipovly *)ip; 372 /* 373 * Checksum extended TCP header and data. 374 */ 375 len = drop_hdrlen + tlen; 376 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 377 ipov->ih_len = htons(tlen); 378 th->th_sum = in_cksum(m, len); 379 /* Reset length for SDT probes. */ 380 ip->ip_len = htons(len); 381 /* Reset TOS bits */ 382 ip->ip_tos = iptos; 383 /* Re-initialization for later version check */ 384 ip->ip_v = IPVERSION; 385 ip->ip_hl = sizeof(*ip) >> 2; 386 } 387 if (th->th_sum) { 388 KMOD_TCPSTAT_INC(tcps_rcvbadsum); 389 m_freem(m); 390 goto skipped_pkt; 391 } 392 break; 393 } 394 #endif 395 } 396 /* 397 * Convert TCP protocol specific fields to host format. 398 */ 399 tcp_fields_to_host(th); 400 401 off = th->th_off << 2; 402 if (off < sizeof (struct tcphdr) || off > tlen) { 403 KMOD_TCPSTAT_INC(tcps_rcvbadoff); 404 m_freem(m); 405 goto skipped_pkt; 406 } 407 tlen -= off; 408 drop_hdrlen += off; 409 /* 410 * Now lets setup the timeval to be when we should 411 * have been called (if we can). 412 */ 413 m->m_pkthdr.lro_nsegs = 1; 414 if (m->m_flags & M_TSTMP_LRO) { 415 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 416 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 417 } else { 418 /* Should not be should we kassert instead? */ 419 tcp_get_usecs(&tv); 420 } 421 /* Now what about next packet? */ 422 if (m_save || has_pkt) 423 nxt_pkt = 1; 424 else 425 nxt_pkt = 0; 426 retval = (*tp->t_fb->tfb_do_segment_nounlock)(m, th, so, tp, drop_hdrlen, tlen, 427 iptos, nxt_pkt, &tv); 428 if (retval) { 429 /* We lost the lock and tcb probably */ 430 m = m_save; 431 while(m) { 432 m_save = m->m_nextpkt; 433 m->m_nextpkt = NULL; 434 m_freem(m); 435 m = m_save; 436 } 437 if (no_vn == 0) 438 CURVNET_RESTORE(); 439 return(retval); 440 } 441 skipped_pkt: 442 m = m_save; 443 } 444 if (no_vn == 0) 445 CURVNET_RESTORE(); 446 return(retval); 447 } 448 449 int 450 ctf_do_queued_segments(struct socket *so, struct tcpcb *tp, int have_pkt) 451 { 452 struct mbuf *m; 453 454 /* First lets see if we have old packets */ 455 if (tp->t_in_pkt) { 456 m = tp->t_in_pkt; 457 tp->t_in_pkt = NULL; 458 tp->t_tail_pkt = NULL; 459 if (ctf_process_inbound_raw(tp, so, m, have_pkt)) { 460 /* We lost the tcpcb (maybe a RST came in)? */ 461 return(1); 462 } 463 } 464 return (0); 465 } 466 467 uint32_t 468 ctf_outstanding(struct tcpcb *tp) 469 { 470 return(tp->snd_max - tp->snd_una); 471 } 472 473 uint32_t 474 ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked) 475 { 476 if (rc_sacked <= ctf_outstanding(tp)) 477 return(ctf_outstanding(tp) - rc_sacked); 478 else { 479 /* TSNH */ 480 #ifdef INVARIANTS 481 panic("tp:%p rc_sacked:%d > out:%d", 482 tp, rc_sacked, ctf_outstanding(tp)); 483 #endif 484 return (0); 485 } 486 } 487 488 void 489 ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, 490 int32_t rstreason, int32_t tlen) 491 { 492 if (tp != NULL) { 493 tcp_dropwithreset(m, th, tp, tlen, rstreason); 494 INP_WUNLOCK(tp->t_inpcb); 495 } else 496 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 497 } 498 499 /* 500 * ctf_drop_checks returns 1 for you should not proceed. It places 501 * in ret_val what should be returned 1/0 by the caller. The 1 indicates 502 * that the TCB is unlocked and probably dropped. The 0 indicates the 503 * TCB is still valid and locked. 504 */ 505 int 506 ctf_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val) 507 { 508 int32_t todrop; 509 int32_t thflags; 510 int32_t tlen; 511 512 thflags = *thf; 513 tlen = *tlenp; 514 todrop = tp->rcv_nxt - th->th_seq; 515 if (todrop > 0) { 516 if (thflags & TH_SYN) { 517 thflags &= ~TH_SYN; 518 th->th_seq++; 519 if (th->th_urp > 1) 520 th->th_urp--; 521 else 522 thflags &= ~TH_URG; 523 todrop--; 524 } 525 /* 526 * Following if statement from Stevens, vol. 2, p. 960. 527 */ 528 if (todrop > tlen 529 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 530 /* 531 * Any valid FIN must be to the left of the window. 532 * At this point the FIN must be a duplicate or out 533 * of sequence; drop it. 534 */ 535 thflags &= ~TH_FIN; 536 /* 537 * Send an ACK to resynchronize and drop any data. 538 * But keep on processing for RST or ACK. 539 */ 540 tp->t_flags |= TF_ACKNOW; 541 todrop = tlen; 542 KMOD_TCPSTAT_INC(tcps_rcvduppack); 543 KMOD_TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 544 } else { 545 KMOD_TCPSTAT_INC(tcps_rcvpartduppack); 546 KMOD_TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 547 } 548 /* 549 * DSACK - add SACK block for dropped range 550 */ 551 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) { 552 tcp_update_sack_list(tp, th->th_seq, 553 th->th_seq + todrop); 554 /* 555 * ACK now, as the next in-sequence segment 556 * will clear the DSACK block again 557 */ 558 tp->t_flags |= TF_ACKNOW; 559 } 560 *drop_hdrlen += todrop; /* drop from the top afterwards */ 561 th->th_seq += todrop; 562 tlen -= todrop; 563 if (th->th_urp > todrop) 564 th->th_urp -= todrop; 565 else { 566 thflags &= ~TH_URG; 567 th->th_urp = 0; 568 } 569 } 570 /* 571 * If segment ends after window, drop trailing data (and PUSH and 572 * FIN); if nothing left, just ACK. 573 */ 574 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 575 if (todrop > 0) { 576 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 577 if (todrop >= tlen) { 578 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 579 /* 580 * If window is closed can only take segments at 581 * window edge, and have to drop data and PUSH from 582 * incoming segments. Continue processing, but 583 * remember to ack. Otherwise, drop segment and 584 * ack. 585 */ 586 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 587 tp->t_flags |= TF_ACKNOW; 588 KMOD_TCPSTAT_INC(tcps_rcvwinprobe); 589 } else { 590 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 591 return (1); 592 } 593 } else 594 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 595 m_adj(m, -todrop); 596 tlen -= todrop; 597 thflags &= ~(TH_PUSH | TH_FIN); 598 } 599 *thf = thflags; 600 *tlenp = tlen; 601 return (0); 602 } 603 604 /* 605 * The value in ret_val informs the caller 606 * if we dropped the tcb (and lock) or not. 607 * 1 = we dropped it, 0 = the TCB is still locked 608 * and valid. 609 */ 610 void 611 ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val) 612 { 613 /* 614 * Generate an ACK dropping incoming segment if it occupies sequence 615 * space, where the ACK reflects our state. 616 * 617 * We can now skip the test for the RST flag since all paths to this 618 * code happen after packets containing RST have been dropped. 619 * 620 * In the SYN-RECEIVED state, don't send an ACK unless the segment 621 * we received passes the SYN-RECEIVED ACK test. If it fails send a 622 * RST. This breaks the loop in the "LAND" DoS attack, and also 623 * prevents an ACK storm between two listening ports that have been 624 * sent forged SYN segments, each with the source address of the 625 * other. 626 */ 627 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 628 (SEQ_GT(tp->snd_una, th->th_ack) || 629 SEQ_GT(th->th_ack, tp->snd_max))) { 630 *ret_val = 1; 631 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 632 return; 633 } else 634 *ret_val = 0; 635 tp->t_flags |= TF_ACKNOW; 636 if (m) 637 m_freem(m); 638 } 639 640 void 641 ctf_do_drop(struct mbuf *m, struct tcpcb *tp) 642 { 643 644 /* 645 * Drop space held by incoming segment and return. 646 */ 647 if (tp != NULL) 648 INP_WUNLOCK(tp->t_inpcb); 649 if (m) 650 m_freem(m); 651 } 652 653 int 654 ctf_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp) 655 { 656 /* 657 * RFC5961 Section 3.2 658 * 659 * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in 660 * window, we send challenge ACK. 661 * 662 * Note: to take into account delayed ACKs, we should test against 663 * last_ack_sent instead of rcv_nxt. Note 2: we handle special case 664 * of closed window, not covered by the RFC. 665 */ 666 int dropped = 0; 667 668 if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) && 669 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 670 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 671 672 KASSERT(tp->t_state != TCPS_SYN_SENT, 673 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 674 __func__, th, tp)); 675 676 if (V_tcp_insecure_rst || 677 (tp->last_ack_sent == th->th_seq) || 678 (tp->rcv_nxt == th->th_seq) || 679 ((tp->last_ack_sent - 1) == th->th_seq)) { 680 KMOD_TCPSTAT_INC(tcps_drops); 681 /* Drop the connection. */ 682 switch (tp->t_state) { 683 case TCPS_SYN_RECEIVED: 684 so->so_error = ECONNREFUSED; 685 goto close; 686 case TCPS_ESTABLISHED: 687 case TCPS_FIN_WAIT_1: 688 case TCPS_FIN_WAIT_2: 689 case TCPS_CLOSE_WAIT: 690 case TCPS_CLOSING: 691 case TCPS_LAST_ACK: 692 so->so_error = ECONNRESET; 693 close: 694 tcp_state_change(tp, TCPS_CLOSED); 695 /* FALLTHROUGH */ 696 default: 697 tp = tcp_close(tp); 698 } 699 dropped = 1; 700 ctf_do_drop(m, tp); 701 } else { 702 KMOD_TCPSTAT_INC(tcps_badrst); 703 /* Send challenge ACK. */ 704 tcp_respond(tp, mtod(m, void *), th, m, 705 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 706 tp->last_ack_sent = tp->rcv_nxt; 707 } 708 } else { 709 m_freem(m); 710 } 711 return (dropped); 712 } 713 714 /* 715 * The value in ret_val informs the caller 716 * if we dropped the tcb (and lock) or not. 717 * 1 = we dropped it, 0 = the TCB is still locked 718 * and valid. 719 */ 720 void 721 ctf_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ret_val) 722 { 723 724 NET_EPOCH_ASSERT(); 725 726 KMOD_TCPSTAT_INC(tcps_badsyn); 727 if (V_tcp_insecure_syn && 728 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 729 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 730 tp = tcp_drop(tp, ECONNRESET); 731 *ret_val = 1; 732 ctf_do_drop(m, tp); 733 } else { 734 /* Send challenge ACK. */ 735 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 736 tp->snd_nxt, TH_ACK); 737 tp->last_ack_sent = tp->rcv_nxt; 738 m = NULL; 739 *ret_val = 0; 740 ctf_do_drop(m, NULL); 741 } 742 } 743 744 /* 745 * bbr_ts_check returns 1 for you should not proceed, the state 746 * machine should return. It places in ret_val what should 747 * be returned 1/0 by the caller (hpts_do_segment). The 1 indicates 748 * that the TCB is unlocked and probably dropped. The 0 indicates the 749 * TCB is still valid and locked. 750 */ 751 int 752 ctf_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 753 int32_t tlen, int32_t thflags, int32_t * ret_val) 754 { 755 756 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 757 /* 758 * Invalidate ts_recent. If this segment updates ts_recent, 759 * the age will be reset later and ts_recent will get a 760 * valid value. If it does not, setting ts_recent to zero 761 * will at least satisfy the requirement that zero be placed 762 * in the timestamp echo reply when ts_recent isn't valid. 763 * The age isn't reset until we get a valid ts_recent 764 * because we don't want out-of-order segments to be dropped 765 * when ts_recent is old. 766 */ 767 tp->ts_recent = 0; 768 } else { 769 KMOD_TCPSTAT_INC(tcps_rcvduppack); 770 KMOD_TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 771 KMOD_TCPSTAT_INC(tcps_pawsdrop); 772 *ret_val = 0; 773 if (tlen) { 774 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 775 } else { 776 ctf_do_drop(m, NULL); 777 } 778 return (1); 779 } 780 return (0); 781 } 782 783 void 784 ctf_calc_rwin(struct socket *so, struct tcpcb *tp) 785 { 786 int32_t win; 787 788 /* 789 * Calculate amount of space in receive window, and then do TCP 790 * input processing. Receive window is amount of space in rcv queue, 791 * but not less than advertised window. 792 */ 793 win = sbspace(&so->so_rcv); 794 if (win < 0) 795 win = 0; 796 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 797 } 798 799 void 800 ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, 801 int32_t rstreason, int32_t tlen) 802 { 803 804 if (tp->t_inpcb) { 805 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 806 } 807 tcp_dropwithreset(m, th, tp, tlen, rstreason); 808 INP_WUNLOCK(tp->t_inpcb); 809 } 810 811 uint32_t 812 ctf_fixed_maxseg(struct tcpcb *tp) 813 { 814 int optlen; 815 816 if (tp->t_flags & TF_NOOPT) 817 return (tp->t_maxseg); 818 819 /* 820 * Here we have a simplified code from tcp_addoptions(), 821 * without a proper loop, and having most of paddings hardcoded. 822 * We only consider fixed options that we would send every 823 * time I.e. SACK is not considered. 824 * 825 */ 826 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4) 827 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 828 if (tp->t_flags & TF_RCVD_TSTMP) 829 optlen = TCPOLEN_TSTAMP_APPA; 830 else 831 optlen = 0; 832 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 833 if (tp->t_flags & TF_SIGNATURE) 834 optlen += PAD(TCPOLEN_SIGNATURE); 835 #endif 836 } else { 837 if (tp->t_flags & TF_REQ_TSTMP) 838 optlen = TCPOLEN_TSTAMP_APPA; 839 else 840 optlen = PAD(TCPOLEN_MAXSEG); 841 if (tp->t_flags & TF_REQ_SCALE) 842 optlen += PAD(TCPOLEN_WINDOW); 843 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 844 if (tp->t_flags & TF_SIGNATURE) 845 optlen += PAD(TCPOLEN_SIGNATURE); 846 #endif 847 if (tp->t_flags & TF_SACK_PERMIT) 848 optlen += PAD(TCPOLEN_SACK_PERMITTED); 849 } 850 #undef PAD 851 optlen = min(optlen, TCP_MAXOLEN); 852 return (tp->t_maxseg - optlen); 853 } 854 855 void 856 ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_blocks) 857 { 858 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 859 union tcp_log_stackspecific log; 860 struct timeval tv; 861 862 memset(&log, 0, sizeof(log)); 863 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 864 log.u_bbr.flex8 = num_sack_blks; 865 if (num_sack_blks > 0) { 866 log.u_bbr.flex1 = sack_blocks[0].start; 867 log.u_bbr.flex2 = sack_blocks[0].end; 868 } 869 if (num_sack_blks > 1) { 870 log.u_bbr.flex3 = sack_blocks[1].start; 871 log.u_bbr.flex4 = sack_blocks[1].end; 872 } 873 if (num_sack_blks > 2) { 874 log.u_bbr.flex5 = sack_blocks[2].start; 875 log.u_bbr.flex6 = sack_blocks[2].end; 876 } 877 if (num_sack_blks > 3) { 878 log.u_bbr.applimited = sack_blocks[3].start; 879 log.u_bbr.pkts_out = sack_blocks[3].end; 880 } 881 TCP_LOG_EVENTP(tp, NULL, 882 &tp->t_inpcb->inp_socket->so_rcv, 883 &tp->t_inpcb->inp_socket->so_snd, 884 TCP_SACK_FILTER_RES, 0, 885 0, &log, false, &tv); 886 } 887 } 888 889 uint32_t 890 ctf_decay_count(uint32_t count, uint32_t decay) 891 { 892 /* 893 * Given a count, decay it by a set percentage. The 894 * percentage is in thousands i.e. 100% = 1000, 895 * 19.3% = 193. 896 */ 897 uint64_t perc_count, decay_per; 898 uint32_t decayed_count; 899 if (decay > 1000) { 900 /* We don't raise it */ 901 return (count); 902 } 903 perc_count = count; 904 decay_per = decay; 905 perc_count *= decay_per; 906 perc_count /= 1000; 907 /* 908 * So now perc_count holds the 909 * count decay value. 910 */ 911 decayed_count = count - (uint32_t)perc_count; 912 return(decayed_count); 913 } 914